In [2]:
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
In [3]:
cd drive/MyDrive/project_files/
/content/drive/MyDrive/project_files
In [4]:
ls
csv2object_and_unetDemo.ipynb  project_files/  saved_models/  visualize.py
data/                          __pycache__/    utils.py
In [5]:
import csv
import json
import os
from PIL import Image

import pprint

import skimage.draw

import visualize
import utils as my_utils

import matplotlib.pyplot as plt
import cv2

import numpy as np

import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from IPython.display import clear_output

Data structures

In [6]:
# global context variables
anno_path = './data/csv' # a folder of annotation files
img_path = './data/images' # a folder of image files
In [7]:
class annotation:
    # label: the class label 
    # xcoords: list of x-coordinates of a polygon
    # ycoords: list of y-coordinates of a polygon
    def __str__(self):
        return '* label: ' + self.label +  '\n  xcoords: ' + str(self.xcoords) +  '\n  ycoords: ' + str(self.ycoords) + '\n'
    def __repr__(self):
        return '* label: ' + self.label +  '\n  xcoords: ' + str(self.xcoords) +  '\n  ycoords: ' + str(self.ycoords) + '\n'
In [8]:
class img_meta:
    # img_name: image file name
    # annotations: the annotation list
    
    def __str__(self):
        return 'img_name: ' + self.img_name +  ', annotations: \n' + str(self.annotations)
    def __repr__(self):
        return 'img_name: ' + self.img_name +  ', annotations: \n' + str(self.annotations)
    
    def parse(self, anno_file_name):
        anno_file_path = os.path.join(anno_path, anno_file_name)        
        with open(anno_file_path, 'r') as file:
            csvReader = csv.DictReader(file)
            self.annotations = []
            for idx, rows in enumerate(csvReader):
                anno_obj = annotation()
                if idx == 0: self.img_name = rows['filename'] # get image file name from the first row
                region_attributes = rows['region_attributes']
                region_attributes = json.loads(region_attributes) # convert to dict
                anno_obj.label = region_attributes['class'] # get annotation class
                region_shape_attributes = json.loads(rows['region_shape_attributes'])
                anno_obj.xcoords = region_shape_attributes['all_points_x']
                anno_obj.ycoords = region_shape_attributes['all_points_y']
                self.annotations.append(anno_obj)
    
    #This function uses opencv rather than PIL to load image.
    #PIL has issues with the image channel
    def load_image(self):
        
        img_file_path = os.path.join(img_path, self.img_name)
        
        """Load the specified image and return a [H,W,3] Numpy array.
        """
        # Load image
        image = cv2.imread(img_file_path)
        # If grayscale. Convert to RGB for consistency.
        if image.ndim != 3:
            image = skimage.color.gray2rgb(image)
        # If has an alpha channel, remove it for consistency
        if image.shape[-1] == 4:
            image = image[..., :3]

        return image
    #horizontal image augmentation 
    def horizontal_image_augmentation(self, anno_file_name,image_width_size):
        anno_file_path = os.path.join(anno_path, anno_file_name)        
        with open(anno_file_path, 'r') as file:
            csvReader = csv.DictReader(file)
            self.annotations = []
            for idx, rows in enumerate(csvReader):
                anno_obj = annotation()
                if idx == 0: self.img_name = rows['filename'] # get image file name from the first row
                region_attributes = rows['region_attributes']
                region_attributes = json.loads(region_attributes) # convert to dict
                anno_obj.label = region_attributes['class'] # get annotation class
                region_shape_attributes = json.loads(rows['region_shape_attributes'])
                anno_obj.xcoords = region_shape_attributes['all_points_x']
                anno_obj.ycoords = region_shape_attributes['all_points_y']
                #print(type(anno_obj.xcoords))
                anno_obj.xcoords = [image_width_size-x for x in anno_obj.xcoords]
  
                self.annotations.append(anno_obj)
    #vertical image augmentation 
    def vertical_image_augmentation(self, anno_file_name,image_height_size):
        anno_file_path = os.path.join(anno_path, anno_file_name)        
        with open(anno_file_path, 'r') as file:
            csvReader = csv.DictReader(file)
            self.annotations = []
            for idx, rows in enumerate(csvReader):
                anno_obj = annotation()
                if idx == 0: self.img_name = rows['filename'] # get image file name from the first row
                region_attributes = rows['region_attributes']
                region_attributes = json.loads(region_attributes) # convert to dict
                anno_obj.label = region_attributes['class'] # get annotation class
                region_shape_attributes = json.loads(rows['region_shape_attributes'])
                anno_obj.xcoords = region_shape_attributes['all_points_x']
                anno_obj.ycoords = region_shape_attributes['all_points_y']
                #print(type(anno_obj.xcoords))
                anno_obj.ycoords = [image_height_size-x for x in anno_obj.ycoords]
  
                self.annotations.append(anno_obj)

Parse an annotation file

In [9]:
# test the parsing of one annotation file
test_anno_file = 'Adityap_via_project_42.csv'

img_meta_obj = img_meta()
img_meta_obj.parse(test_anno_file)
#print(type(img_meta_obj))
pprint.pprint(img_meta_obj)


img_obj = img_meta_obj.load_image()
image_shape=img_obj.shape
print(image_shape[0],image_shape[1])
#print(img_obj.shape)
print("###########")
img_meta_obj.horizontal_image_augmentation(test_anno_file,image_shape[0])
pprint.pprint(img_meta_obj)


print("###########")
img_meta_obj.vertical_image_augmentation(test_anno_file,image_shape[1])
pprint.pprint(img_meta_obj)
img_name: aditya-42.png, annotations: 
[* label: single-colony
  xcoords: [90, 90, 89, 88, 88, 88, 89, 90, 90]
  ycoords: [73, 74, 75, 74, 73, 72, 72, 71, 73]
, * label: single-colony
  xcoords: [109, 110, 110, 110, 110, 109, 108, 108]
  ycoords: [59, 60, 60, 61, 63, 63, 61, 60]
, * label: single-colony
  xcoords: [70, 70, 69, 69, 69, 69, 69, 70, 70, 71, 72, 72, 73, 73, 73, 72, 72]
  ycoords: [168, 169, 170, 170, 171, 171, 172, 173, 173, 173, 173, 172, 171, 170, 169, 168, 168]
, * label: single-colony
  xcoords: [54, 55, 56, 56, 56, 55]
  ycoords: [156, 154, 155, 157, 158, 157]
, * label: single-colony
  xcoords: [33, 34, 33, 32, 31, 32, 33]
  ycoords: [159, 160, 162, 161, 160, 159, 159]
, * label: single-colony
  xcoords: [42, 43, 43, 44, 44, 44, 43, 42]
  ycoords: [156, 155, 154, 154, 155, 156, 157, 156]
, * label: single-colony
  xcoords: [115, 116, 117, 117, 117, 116, 115]
  ycoords: [167, 167, 168, 169, 170, 170, 170]
, * label: single-colony
  xcoords: [75, 75, 74, 75, 75, 76, 76, 77, 77, 77, 76, 75]
  ycoords: [167, 168, 169, 170, 172, 173, 172, 171, 169, 168, 167, 166]
, * label: amorphous-colony
  xcoords: [138, 138, 137, 137, 137, 136, 136, 135, 135, 135, 135, 134, 134, 135, 135, 136, 136, 136, 137, 137, 137, 137, 138, 138, 138, 139, 139, 139, 140, 140, 140, 140, 139, 139, 139, 139, 138, 138]
  ycoords: [218, 217, 216, 216, 217, 217, 218, 218, 217, 216, 215, 215, 215, 214, 214, 214, 213, 212, 212, 212, 212, 213, 214, 214, 214, 214, 214, 215, 215, 216, 216, 217, 217, 218, 218, 218, 218, 218]
, * label: single-colony
  xcoords: [82, 82, 82, 82, 83, 84, 84, 85, 85, 85, 84, 83, 83, 82, 81, 81]
  ycoords: [73, 74, 75, 76, 76, 76, 75, 75, 74, 73, 73, 72, 71, 71, 72, 73]
, * label: single-colony
  xcoords: [83, 84, 85, 85, 85, 84, 84, 83, 83, 83]
  ycoords: [77, 77, 77, 78, 78, 79, 79, 79, 78, 77]
, * label: single-colony
  xcoords: [96, 96, 96, 97, 97, 97, 97]
  ycoords: [90, 91, 92, 93, 91, 90, 90]
, * label: amorphous-colony
  xcoords: [134, 134, 134, 135, 135, 135, 134, 134, 133, 133, 132, 131, 131, 131, 131, 131, 132, 132, 133]
  ycoords: [213, 212, 212, 211, 211, 210, 210, 210, 209, 209, 209, 210, 210, 211, 212, 212, 212, 211, 211]
, * label: single-colony
  xcoords: [145, 147, 147, 148, 148, 148, 148, 147, 145, 144, 144, 144, 145]
  ycoords: [183, 184, 184, 183, 183, 181, 180, 180, 180, 180, 182, 183, 184]
, * label: amorphous-colony
  xcoords: [181, 183, 184, 184, 185, 186, 186, 185, 185, 185, 186, 187, 187, 187, 188, 188, 188, 189, 188, 188, 188, 187, 189, 189, 189, 189, 187, 187, 187, 186, 185, 184, 183, 182, 181, 181, 181, 180, 182]
  ycoords: [159, 159, 158, 156, 156, 156, 157, 158, 158, 159, 159, 159, 158, 157, 157, 157, 158, 159, 159, 160, 160, 161, 161, 161, 162, 162, 162, 162, 163, 164, 164, 164, 163, 163, 162, 161, 161, 159, 159]
, * label: amorphous-colony
  xcoords: [177, 175, 174, 173, 171, 168, 169, 171, 173, 174, 174, 175, 176, 177, 177, 177]
  ycoords: [158, 159, 160, 160, 159, 159, 157, 156, 156, 156, 157, 157, 156, 156, 157, 158]
, * label: amorphous-colony
  xcoords: [201, 203, 203, 204, 205, 204, 205, 205, 206, 207, 208, 208, 208, 208, 208, 207, 207, 206, 206, 205, 204, 204, 203, 203, 202, 201, 200, 200, 201]
  ycoords: [117, 118, 118, 118, 119, 120, 120, 120, 120, 119, 118, 118, 118, 117, 116, 116, 115, 115, 115, 115, 114, 114, 114, 115, 116, 116, 116, 117, 117]
, * label: single-colony
  xcoords: [168, 169, 169, 170, 170, 170, 170, 169, 169, 168, 168, 168]
  ycoords: [178, 178, 177, 177, 178, 179, 180, 180, 180, 180, 180, 180]
, * label: amorphous-colony
  xcoords: [247, 245, 246, 246, 247, 247, 248, 249, 249, 249, 248, 248, 248, 249, 249, 249, 249, 249, 249, 248, 248, 248, 247]
  ycoords: [147, 150, 150, 151, 151, 151, 151, 152, 151, 150, 150, 149, 149, 148, 147, 147, 147, 147, 146, 146, 146, 146, 146]
, * label: single-colony
  xcoords: [242, 242, 242, 243, 241, 241, 239, 239, 240]
  ycoords: [163, 163, 164, 165, 166, 166, 166, 164, 164]
, * label: single-colony
  xcoords: [124, 124, 125, 125, 125]
  ycoords: [207, 206, 205, 206, 207]
, * label: single-colony
  xcoords: [179, 178, 177, 177, 176, 176, 177, 178]
  ycoords: [136, 137, 137, 136, 136, 135, 135, 135]
, * label: single-colony
  xcoords: [140, 140, 140, 140, 141, 141, 141]
  ycoords: [58, 59, 60, 61, 61, 60, 58]
, * label: single-colony
  xcoords: [116, 117, 117, 118, 117, 116, 116]
  ycoords: [119, 120, 119, 118, 117, 117, 116]
, * label: single-colony
  xcoords: [65, 64, 65, 66, 67, 69, 69, 68, 66]
  ycoords: [193, 194, 195, 196, 196, 196, 195, 193, 193]
, * label: single-colony
  xcoords: [171, 172, 173, 174, 175, 176, 175, 174, 173, 171, 170]
  ycoords: [163, 163, 163, 163, 163, 165, 166, 166, 166, 165, 163]
, * label: single-colony
  xcoords: [114, 114, 115, 116, 117, 118, 118, 117]
  ycoords: [163, 164, 165, 165, 163, 163, 161, 161]
, * label: merged-colony
  xcoords: [144, 144, 144, 144, 144, 145, 145, 145, 145, 145, 145, 145, 146, 146, 146, 146, 145, 145]
  ycoords: [204, 205, 206, 207, 208, 208, 208, 207, 206, 206, 205, 205, 204, 204, 204, 203, 203, 203]
, * label: single-colony
  xcoords: [149, 148, 148, 148, 149, 149, 150, 150, 150, 150, 150]
  ycoords: [206, 207, 207, 208, 208, 208, 208, 207, 207, 206, 206]
, * label: single-colony
  xcoords: [177, 177, 177, 177, 178, 178, 178, 178, 178, 179, 179, 179, 178]
  ycoords: [67, 68, 68, 69, 69, 69, 69, 68, 68, 68, 68, 67, 67]
, * label: single-colony
  xcoords: [203, 202, 202, 202, 203, 203, 203, 204, 204, 204, 204]
  ycoords: [81, 81, 80, 80, 80, 80, 80, 80, 80, 81, 81]
, * label: single-colony
  xcoords: [203, 203, 202, 202, 202, 203, 203, 204, 204, 204]
  ycoords: [82, 82, 83, 83, 84, 84, 84, 84, 83, 82]
, * label: single-colony
  xcoords: [164, 164, 164, 164, 165, 165, 165]
  ycoords: [211, 212, 212, 213, 214, 213, 212]
, * label: single-colony
  xcoords: [79, 79, 79, 79, 79, 79, 79, 80, 80, 80, 80, 80]
  ycoords: [167, 168, 169, 169, 169, 170, 170, 171, 170, 169, 169, 169]
, * label: single-colony
  xcoords: [103, 103, 103, 102, 103, 104, 104, 104, 104, 104]
  ycoords: [193, 194, 195, 196, 196, 196, 196, 196, 195, 194]
, * label: single-colony
  xcoords: [91, 90, 90, 90, 91, 91, 92]
  ycoords: [203, 203, 204, 204, 204, 204, 203]
, * label: single-colony
  xcoords: [209, 209, 208, 208, 209, 209, 210, 210, 210, 210]
  ycoords: [80, 80, 81, 81, 82, 82, 82, 82, 81, 80]
]
292 292
###########
img_name: aditya-42.png, annotations: 
[* label: single-colony
  xcoords: [202, 202, 203, 204, 204, 204, 203, 202, 202]
  ycoords: [73, 74, 75, 74, 73, 72, 72, 71, 73]
, * label: single-colony
  xcoords: [183, 182, 182, 182, 182, 183, 184, 184]
  ycoords: [59, 60, 60, 61, 63, 63, 61, 60]
, * label: single-colony
  xcoords: [222, 222, 223, 223, 223, 223, 223, 222, 222, 221, 220, 220, 219, 219, 219, 220, 220]
  ycoords: [168, 169, 170, 170, 171, 171, 172, 173, 173, 173, 173, 172, 171, 170, 169, 168, 168]
, * label: single-colony
  xcoords: [238, 237, 236, 236, 236, 237]
  ycoords: [156, 154, 155, 157, 158, 157]
, * label: single-colony
  xcoords: [259, 258, 259, 260, 261, 260, 259]
  ycoords: [159, 160, 162, 161, 160, 159, 159]
, * label: single-colony
  xcoords: [250, 249, 249, 248, 248, 248, 249, 250]
  ycoords: [156, 155, 154, 154, 155, 156, 157, 156]
, * label: single-colony
  xcoords: [177, 176, 175, 175, 175, 176, 177]
  ycoords: [167, 167, 168, 169, 170, 170, 170]
, * label: single-colony
  xcoords: [217, 217, 218, 217, 217, 216, 216, 215, 215, 215, 216, 217]
  ycoords: [167, 168, 169, 170, 172, 173, 172, 171, 169, 168, 167, 166]
, * label: amorphous-colony
  xcoords: [154, 154, 155, 155, 155, 156, 156, 157, 157, 157, 157, 158, 158, 157, 157, 156, 156, 156, 155, 155, 155, 155, 154, 154, 154, 153, 153, 153, 152, 152, 152, 152, 153, 153, 153, 153, 154, 154]
  ycoords: [218, 217, 216, 216, 217, 217, 218, 218, 217, 216, 215, 215, 215, 214, 214, 214, 213, 212, 212, 212, 212, 213, 214, 214, 214, 214, 214, 215, 215, 216, 216, 217, 217, 218, 218, 218, 218, 218]
, * label: single-colony
  xcoords: [210, 210, 210, 210, 209, 208, 208, 207, 207, 207, 208, 209, 209, 210, 211, 211]
  ycoords: [73, 74, 75, 76, 76, 76, 75, 75, 74, 73, 73, 72, 71, 71, 72, 73]
, * label: single-colony
  xcoords: [209, 208, 207, 207, 207, 208, 208, 209, 209, 209]
  ycoords: [77, 77, 77, 78, 78, 79, 79, 79, 78, 77]
, * label: single-colony
  xcoords: [196, 196, 196, 195, 195, 195, 195]
  ycoords: [90, 91, 92, 93, 91, 90, 90]
, * label: amorphous-colony
  xcoords: [158, 158, 158, 157, 157, 157, 158, 158, 159, 159, 160, 161, 161, 161, 161, 161, 160, 160, 159]
  ycoords: [213, 212, 212, 211, 211, 210, 210, 210, 209, 209, 209, 210, 210, 211, 212, 212, 212, 211, 211]
, * label: single-colony
  xcoords: [147, 145, 145, 144, 144, 144, 144, 145, 147, 148, 148, 148, 147]
  ycoords: [183, 184, 184, 183, 183, 181, 180, 180, 180, 180, 182, 183, 184]
, * label: amorphous-colony
  xcoords: [111, 109, 108, 108, 107, 106, 106, 107, 107, 107, 106, 105, 105, 105, 104, 104, 104, 103, 104, 104, 104, 105, 103, 103, 103, 103, 105, 105, 105, 106, 107, 108, 109, 110, 111, 111, 111, 112, 110]
  ycoords: [159, 159, 158, 156, 156, 156, 157, 158, 158, 159, 159, 159, 158, 157, 157, 157, 158, 159, 159, 160, 160, 161, 161, 161, 162, 162, 162, 162, 163, 164, 164, 164, 163, 163, 162, 161, 161, 159, 159]
, * label: amorphous-colony
  xcoords: [115, 117, 118, 119, 121, 124, 123, 121, 119, 118, 118, 117, 116, 115, 115, 115]
  ycoords: [158, 159, 160, 160, 159, 159, 157, 156, 156, 156, 157, 157, 156, 156, 157, 158]
, * label: amorphous-colony
  xcoords: [91, 89, 89, 88, 87, 88, 87, 87, 86, 85, 84, 84, 84, 84, 84, 85, 85, 86, 86, 87, 88, 88, 89, 89, 90, 91, 92, 92, 91]
  ycoords: [117, 118, 118, 118, 119, 120, 120, 120, 120, 119, 118, 118, 118, 117, 116, 116, 115, 115, 115, 115, 114, 114, 114, 115, 116, 116, 116, 117, 117]
, * label: single-colony
  xcoords: [124, 123, 123, 122, 122, 122, 122, 123, 123, 124, 124, 124]
  ycoords: [178, 178, 177, 177, 178, 179, 180, 180, 180, 180, 180, 180]
, * label: amorphous-colony
  xcoords: [45, 47, 46, 46, 45, 45, 44, 43, 43, 43, 44, 44, 44, 43, 43, 43, 43, 43, 43, 44, 44, 44, 45]
  ycoords: [147, 150, 150, 151, 151, 151, 151, 152, 151, 150, 150, 149, 149, 148, 147, 147, 147, 147, 146, 146, 146, 146, 146]
, * label: single-colony
  xcoords: [50, 50, 50, 49, 51, 51, 53, 53, 52]
  ycoords: [163, 163, 164, 165, 166, 166, 166, 164, 164]
, * label: single-colony
  xcoords: [168, 168, 167, 167, 167]
  ycoords: [207, 206, 205, 206, 207]
, * label: single-colony
  xcoords: [113, 114, 115, 115, 116, 116, 115, 114]
  ycoords: [136, 137, 137, 136, 136, 135, 135, 135]
, * label: single-colony
  xcoords: [152, 152, 152, 152, 151, 151, 151]
  ycoords: [58, 59, 60, 61, 61, 60, 58]
, * label: single-colony
  xcoords: [176, 175, 175, 174, 175, 176, 176]
  ycoords: [119, 120, 119, 118, 117, 117, 116]
, * label: single-colony
  xcoords: [227, 228, 227, 226, 225, 223, 223, 224, 226]
  ycoords: [193, 194, 195, 196, 196, 196, 195, 193, 193]
, * label: single-colony
  xcoords: [121, 120, 119, 118, 117, 116, 117, 118, 119, 121, 122]
  ycoords: [163, 163, 163, 163, 163, 165, 166, 166, 166, 165, 163]
, * label: single-colony
  xcoords: [178, 178, 177, 176, 175, 174, 174, 175]
  ycoords: [163, 164, 165, 165, 163, 163, 161, 161]
, * label: merged-colony
  xcoords: [148, 148, 148, 148, 148, 147, 147, 147, 147, 147, 147, 147, 146, 146, 146, 146, 147, 147]
  ycoords: [204, 205, 206, 207, 208, 208, 208, 207, 206, 206, 205, 205, 204, 204, 204, 203, 203, 203]
, * label: single-colony
  xcoords: [143, 144, 144, 144, 143, 143, 142, 142, 142, 142, 142]
  ycoords: [206, 207, 207, 208, 208, 208, 208, 207, 207, 206, 206]
, * label: single-colony
  xcoords: [115, 115, 115, 115, 114, 114, 114, 114, 114, 113, 113, 113, 114]
  ycoords: [67, 68, 68, 69, 69, 69, 69, 68, 68, 68, 68, 67, 67]
, * label: single-colony
  xcoords: [89, 90, 90, 90, 89, 89, 89, 88, 88, 88, 88]
  ycoords: [81, 81, 80, 80, 80, 80, 80, 80, 80, 81, 81]
, * label: single-colony
  xcoords: [89, 89, 90, 90, 90, 89, 89, 88, 88, 88]
  ycoords: [82, 82, 83, 83, 84, 84, 84, 84, 83, 82]
, * label: single-colony
  xcoords: [128, 128, 128, 128, 127, 127, 127]
  ycoords: [211, 212, 212, 213, 214, 213, 212]
, * label: single-colony
  xcoords: [213, 213, 213, 213, 213, 213, 213, 212, 212, 212, 212, 212]
  ycoords: [167, 168, 169, 169, 169, 170, 170, 171, 170, 169, 169, 169]
, * label: single-colony
  xcoords: [189, 189, 189, 190, 189, 188, 188, 188, 188, 188]
  ycoords: [193, 194, 195, 196, 196, 196, 196, 196, 195, 194]
, * label: single-colony
  xcoords: [201, 202, 202, 202, 201, 201, 200]
  ycoords: [203, 203, 204, 204, 204, 204, 203]
, * label: single-colony
  xcoords: [83, 83, 84, 84, 83, 83, 82, 82, 82, 82]
  ycoords: [80, 80, 81, 81, 82, 82, 82, 82, 81, 80]
]
###########
img_name: aditya-42.png, annotations: 
[* label: single-colony
  xcoords: [90, 90, 89, 88, 88, 88, 89, 90, 90]
  ycoords: [219, 218, 217, 218, 219, 220, 220, 221, 219]
, * label: single-colony
  xcoords: [109, 110, 110, 110, 110, 109, 108, 108]
  ycoords: [233, 232, 232, 231, 229, 229, 231, 232]
, * label: single-colony
  xcoords: [70, 70, 69, 69, 69, 69, 69, 70, 70, 71, 72, 72, 73, 73, 73, 72, 72]
  ycoords: [124, 123, 122, 122, 121, 121, 120, 119, 119, 119, 119, 120, 121, 122, 123, 124, 124]
, * label: single-colony
  xcoords: [54, 55, 56, 56, 56, 55]
  ycoords: [136, 138, 137, 135, 134, 135]
, * label: single-colony
  xcoords: [33, 34, 33, 32, 31, 32, 33]
  ycoords: [133, 132, 130, 131, 132, 133, 133]
, * label: single-colony
  xcoords: [42, 43, 43, 44, 44, 44, 43, 42]
  ycoords: [136, 137, 138, 138, 137, 136, 135, 136]
, * label: single-colony
  xcoords: [115, 116, 117, 117, 117, 116, 115]
  ycoords: [125, 125, 124, 123, 122, 122, 122]
, * label: single-colony
  xcoords: [75, 75, 74, 75, 75, 76, 76, 77, 77, 77, 76, 75]
  ycoords: [125, 124, 123, 122, 120, 119, 120, 121, 123, 124, 125, 126]
, * label: amorphous-colony
  xcoords: [138, 138, 137, 137, 137, 136, 136, 135, 135, 135, 135, 134, 134, 135, 135, 136, 136, 136, 137, 137, 137, 137, 138, 138, 138, 139, 139, 139, 140, 140, 140, 140, 139, 139, 139, 139, 138, 138]
  ycoords: [74, 75, 76, 76, 75, 75, 74, 74, 75, 76, 77, 77, 77, 78, 78, 78, 79, 80, 80, 80, 80, 79, 78, 78, 78, 78, 78, 77, 77, 76, 76, 75, 75, 74, 74, 74, 74, 74]
, * label: single-colony
  xcoords: [82, 82, 82, 82, 83, 84, 84, 85, 85, 85, 84, 83, 83, 82, 81, 81]
  ycoords: [219, 218, 217, 216, 216, 216, 217, 217, 218, 219, 219, 220, 221, 221, 220, 219]
, * label: single-colony
  xcoords: [83, 84, 85, 85, 85, 84, 84, 83, 83, 83]
  ycoords: [215, 215, 215, 214, 214, 213, 213, 213, 214, 215]
, * label: single-colony
  xcoords: [96, 96, 96, 97, 97, 97, 97]
  ycoords: [202, 201, 200, 199, 201, 202, 202]
, * label: amorphous-colony
  xcoords: [134, 134, 134, 135, 135, 135, 134, 134, 133, 133, 132, 131, 131, 131, 131, 131, 132, 132, 133]
  ycoords: [79, 80, 80, 81, 81, 82, 82, 82, 83, 83, 83, 82, 82, 81, 80, 80, 80, 81, 81]
, * label: single-colony
  xcoords: [145, 147, 147, 148, 148, 148, 148, 147, 145, 144, 144, 144, 145]
  ycoords: [109, 108, 108, 109, 109, 111, 112, 112, 112, 112, 110, 109, 108]
, * label: amorphous-colony
  xcoords: [181, 183, 184, 184, 185, 186, 186, 185, 185, 185, 186, 187, 187, 187, 188, 188, 188, 189, 188, 188, 188, 187, 189, 189, 189, 189, 187, 187, 187, 186, 185, 184, 183, 182, 181, 181, 181, 180, 182]
  ycoords: [133, 133, 134, 136, 136, 136, 135, 134, 134, 133, 133, 133, 134, 135, 135, 135, 134, 133, 133, 132, 132, 131, 131, 131, 130, 130, 130, 130, 129, 128, 128, 128, 129, 129, 130, 131, 131, 133, 133]
, * label: amorphous-colony
  xcoords: [177, 175, 174, 173, 171, 168, 169, 171, 173, 174, 174, 175, 176, 177, 177, 177]
  ycoords: [134, 133, 132, 132, 133, 133, 135, 136, 136, 136, 135, 135, 136, 136, 135, 134]
, * label: amorphous-colony
  xcoords: [201, 203, 203, 204, 205, 204, 205, 205, 206, 207, 208, 208, 208, 208, 208, 207, 207, 206, 206, 205, 204, 204, 203, 203, 202, 201, 200, 200, 201]
  ycoords: [175, 174, 174, 174, 173, 172, 172, 172, 172, 173, 174, 174, 174, 175, 176, 176, 177, 177, 177, 177, 178, 178, 178, 177, 176, 176, 176, 175, 175]
, * label: single-colony
  xcoords: [168, 169, 169, 170, 170, 170, 170, 169, 169, 168, 168, 168]
  ycoords: [114, 114, 115, 115, 114, 113, 112, 112, 112, 112, 112, 112]
, * label: amorphous-colony
  xcoords: [247, 245, 246, 246, 247, 247, 248, 249, 249, 249, 248, 248, 248, 249, 249, 249, 249, 249, 249, 248, 248, 248, 247]
  ycoords: [145, 142, 142, 141, 141, 141, 141, 140, 141, 142, 142, 143, 143, 144, 145, 145, 145, 145, 146, 146, 146, 146, 146]
, * label: single-colony
  xcoords: [242, 242, 242, 243, 241, 241, 239, 239, 240]
  ycoords: [129, 129, 128, 127, 126, 126, 126, 128, 128]
, * label: single-colony
  xcoords: [124, 124, 125, 125, 125]
  ycoords: [85, 86, 87, 86, 85]
, * label: single-colony
  xcoords: [179, 178, 177, 177, 176, 176, 177, 178]
  ycoords: [156, 155, 155, 156, 156, 157, 157, 157]
, * label: single-colony
  xcoords: [140, 140, 140, 140, 141, 141, 141]
  ycoords: [234, 233, 232, 231, 231, 232, 234]
, * label: single-colony
  xcoords: [116, 117, 117, 118, 117, 116, 116]
  ycoords: [173, 172, 173, 174, 175, 175, 176]
, * label: single-colony
  xcoords: [65, 64, 65, 66, 67, 69, 69, 68, 66]
  ycoords: [99, 98, 97, 96, 96, 96, 97, 99, 99]
, * label: single-colony
  xcoords: [171, 172, 173, 174, 175, 176, 175, 174, 173, 171, 170]
  ycoords: [129, 129, 129, 129, 129, 127, 126, 126, 126, 127, 129]
, * label: single-colony
  xcoords: [114, 114, 115, 116, 117, 118, 118, 117]
  ycoords: [129, 128, 127, 127, 129, 129, 131, 131]
, * label: merged-colony
  xcoords: [144, 144, 144, 144, 144, 145, 145, 145, 145, 145, 145, 145, 146, 146, 146, 146, 145, 145]
  ycoords: [88, 87, 86, 85, 84, 84, 84, 85, 86, 86, 87, 87, 88, 88, 88, 89, 89, 89]
, * label: single-colony
  xcoords: [149, 148, 148, 148, 149, 149, 150, 150, 150, 150, 150]
  ycoords: [86, 85, 85, 84, 84, 84, 84, 85, 85, 86, 86]
, * label: single-colony
  xcoords: [177, 177, 177, 177, 178, 178, 178, 178, 178, 179, 179, 179, 178]
  ycoords: [225, 224, 224, 223, 223, 223, 223, 224, 224, 224, 224, 225, 225]
, * label: single-colony
  xcoords: [203, 202, 202, 202, 203, 203, 203, 204, 204, 204, 204]
  ycoords: [211, 211, 212, 212, 212, 212, 212, 212, 212, 211, 211]
, * label: single-colony
  xcoords: [203, 203, 202, 202, 202, 203, 203, 204, 204, 204]
  ycoords: [210, 210, 209, 209, 208, 208, 208, 208, 209, 210]
, * label: single-colony
  xcoords: [164, 164, 164, 164, 165, 165, 165]
  ycoords: [81, 80, 80, 79, 78, 79, 80]
, * label: single-colony
  xcoords: [79, 79, 79, 79, 79, 79, 79, 80, 80, 80, 80, 80]
  ycoords: [125, 124, 123, 123, 123, 122, 122, 121, 122, 123, 123, 123]
, * label: single-colony
  xcoords: [103, 103, 103, 102, 103, 104, 104, 104, 104, 104]
  ycoords: [99, 98, 97, 96, 96, 96, 96, 96, 97, 98]
, * label: single-colony
  xcoords: [91, 90, 90, 90, 91, 91, 92]
  ycoords: [89, 89, 88, 88, 88, 88, 89]
, * label: single-colony
  xcoords: [209, 209, 208, 208, 209, 209, 210, 210, 210, 210]
  ycoords: [212, 212, 211, 211, 210, 210, 210, 210, 211, 212]
]
In [10]:
#pprint.pprint(img_meta_obj)
In [10]:
 
In [11]:
img_obj = img_meta_obj.load_image()
image_shape=img_obj.shape
print(image_shape[0],image_shape[1])
print(img_obj.shape)
visualize.display_images([img_obj])
292 292
(292, 292, 3)
In [12]:
#img_obj_vertical_flip = cv2.flip(img_obj, 0)
#visualize.display_images([img_obj_vertical_flip])  #show the vertically flipped image 

#img_obj_horizontal_flip = cv2.flip(img_obj, 1)
#visualize.display_images([img_obj_horizontal_flip])  #show the vertically flipped image 

Parse all annotation files

In [13]:
img_metas = []
file_name=[]

for file in os.listdir(anno_path):
    if file.endswith('.csv'):
        print('Parsing', file, '...')
        img_meta_obj = img_meta()
        file_name.append(file)
        img_meta_obj.parse(file)
        img_metas.append(img_meta_obj)
    else:
        print('[WARNING] Non-CSV file detected:', file)
Parsing Adityap_via_project_42.csv ...
Parsing ElSafadi-40.csv ...
Parsing Kedhar_via_project_58.csv ...
Parsing Harish_via_project_44.csv ...
Parsing Yuji_via_project_89.csv ...
Parsing Pagare-58.csv ...
Parsing Doppalapudi-20.csv ...
Parsing bonifacesindala_via_project_77.csv ...
Parsing Lee-32.csv ...
Parsing Kolla-30.csv ...
Parsing Pravasini_via_project_66.csv ...
Parsing sagar_via_project_43.csv ...
Parsing Bais-9.csv ...
Parsing Jing_via_project_74.csv ...
Parsing Bachamolla-25.csv ...
Parsing Alshahrani-21.csv ...
Parsing Madhu_via_project_55.csv ...
Parsing sagar_via_project_61 .csv ...
Parsing Trenton_via_project_65.csv ...
Parsing Chen-15.csv ...
Parsing Kolla-48.csv ...
Parsing sandeep_via_project_67.csv ...
Parsing Kedhar_via_project_76.csv ...
Parsing Brian-56.csv ...
Parsing Pravasini_via_project_48.csv ...
Parsing Brendan_via_project_66.csv ...
Parsing santosh_via_project_46.csv ...
Parsing santosh_via_project_64.csv ...
Parsing bonifacesindala_via_project_59.csv ...
Parsing Piyush_via_project_60.csv ...
Parsing Nreddy-55.csv ...
Parsing Kang-47.csv ...
Parsing Bachamolla-7.csv ...
Parsing Brian-38.csv ...
Parsing Piyush_via_project_78.csv ...
Parsing Dorasala-39 (jpg).csv ...
Parsing He-28.csv ...
Parsing Donga-19.csv ...
Parsing Jing_via_project_92.csv ...
Parsing Divvela-18.csv ...
Parsing He-46.csv ...
Parsing Foote-41.csv ...
Parsing Adityap_via_project_60.csv ...
Parsing Sucic_via_project_61.csv ...
Parsing Saylee_via_project_70 .csv ...
Parsing Agberebi-19.csv ...
Parsing Chen-33.csv ...
Parsing Agberebi-1.csv ...
Parsing Doppalapudi-38.csv ...
Parsing Pagare-40.csv ...
Parsing hamid_via_project_88.csv ...
Parsing Gundlapally-44.csv ...
Parsing Gundlapally-26.csv ...
Parsing Amulya_via_project_85 .csv ...
Parsing Krutul-31.csv ...
Parsing Saylee_via_project_52.csv ...
Parsing Foote-23.csv ...
Parsing Trenton_via_project_83.csv ...
Parsing Madhu_via_project_73.csv ...
Parsing Guvvala-27.csv ...
Parsing OLADRI-39.csv ...
Parsing Guvvala-45.csv ...
Parsing Ran Yan_via_project_76.csv ...
Parsing Glass-24.csv ...
Parsing Alshahrani-3.csv ...
Parsing Harish_via_project_62.csv ...
Parsing sandeep_via_project_49.csv ...
Parsing Brendan_via_project_84 .csv ...
Parsing Devanshi_via_project_63.csv ...
Parsing Ran Yan_via_project_94.csv ...
Parsing Sucic_via_project_79.csv ...
Parsing Lee-50.csv ...
Parsing Amulya_via_project_67.csv ...
Parsing Glass-42.csv ...
Parsing Divvela-36.csv ...
Parsing Yuji_via_project_71.csv ...
Parsing Krutul-49.csv ...
Parsing OLADRI-57.csv ...
Parsing Bais-27.csv ...
Parsing Dorasala-21 (jpg).csv ...
Parsing ElSafadi-22.csv ...
Parsing hamid_via_project_70.csv ...
Parsing Devanshi_via_project_45.csv ...
In [14]:
print(len(img_metas))
83
In [15]:
print(file_name[0])
Adityap_via_project_42.csv

Visualization

In [16]:
def load_mask(img_file, img_meta_obj):
    
    h = img_file.shape[0]
    w = img_file.shape[1]
    #print(w)
    #print(h)
    
    annotations = img_meta_obj.annotations
    #print(annotations)
    
    # create one array for all masks, each on a different channel
    masks = np.zeros([h, w, len(annotations)], dtype='uint8')
    #print(masks)
    
    #Draw polygons in empty mask
    for i, r in enumerate(annotations):
        
        class_label = r.label
        all_points_x = r.xcoords
        all_points_y = r.ycoords
        
        #assert (len(all_points_x) == len(all_points_y)), 'all_points_x != all_points_y'
        rr, cc = skimage.draw.polygon(all_points_y, all_points_x)
        masks[rr, cc, i] = 1
        
    return masks
In [17]:
# Visualize a random file

#sample_idx = np.random.choice(len(img_metas))
sample_idx = 1
print("Sample Index: ", sample_idx)

#Image Meta Data
img_meta_obj = img_metas[sample_idx]
print("Number of annotations: ", len(img_meta_obj.annotations))

img_file = img_meta_obj.load_image()
print("Image shape: ", img_file.shape)

masks = load_mask(img_file, img_meta_obj)
print("Mask shape: ", masks.shape)

visualize.display_masked_instances(img_file, img_meta_obj.annotations, masks)
Sample Index:  1
Number of annotations:  21
Image shape:  (266, 273, 3)
Mask shape:  (266, 273, 21)

Data Augmentation Part

In [18]:
# Visualize the augmentation files

#sample_idx = np.random.choice(len(img_metas))
for sample_idx in range(0,2):
#sample_idx = 1
    print("Sample Index: ", sample_idx)
    img_meta_obj = img_meta()
    img_meta_obj.parse(file_name[sample_idx])
    #Image Meta Data
    #img_meta_obj = img_metas[sample_idx]
    print("Number of annotations: ", len(img_meta_obj.annotations))

    img_file = img_meta_obj.load_image()
    print("Image shape: ", img_file.shape)

    masks = load_mask(img_file, img_meta_obj)
    print("Mask shape: ", masks.shape)

    visualize.display_masked_instances(img_file, img_meta_obj.annotations, masks)
    # test the parsing of one annotation file
    #test_anno_file = 'Adityap_via_project_42.csv'


    image_shape=img_file.shape
    img_obj_vertical_flip = cv2.flip(img_file, 0)
    #visualize.display_images([img_obj_vertical_flip])  #show the vertically flipped image 

    img_obj_horizontal_flip = cv2.flip(img_file, 1)
    #visualize.display_images([img_obj_horizontal_flip])  #show the horizotal flipped image 
    print(image_shape[0],image_shape[1])
    #print(img_obj.shape)
    print("###########")
    img_meta_obj_horizontal = img_meta()
    img_meta_obj_horizontal.horizontal_image_augmentation(file_name[sample_idx],image_shape[1])
    #pprint.pprint(img_meta_obj)

    #sample_idx = 2
    #print("Sample Index: ", sample_idx)

    #Image Meta Data
    #img_meta_obj = img_metas[sample_idx]
    print("Number of annotations: ", len(img_meta_obj_horizontal.annotations))

    img_file = img_meta_obj_horizontal.load_image()
    print("Image shape: ", img_file.shape)

    masks_hori = load_mask(img_obj_horizontal_flip, img_meta_obj_horizontal)
    print("Mask shape: ", masks_hori.shape)
    print("sample index=",sample_idx," after horizontal flipping")
    visualize.display_masked_instances(img_obj_horizontal_flip, img_meta_obj_horizontal.annotations, masks_hori)

    print("###########")
    img_meta_obj_vertical = img_meta()
    img_meta_obj_vertical.vertical_image_augmentation(file_name[sample_idx],image_shape[0])
    #pprint.pprint(img_meta_obj)
    masks_verti = load_mask(img_obj_vertical_flip, img_meta_obj_vertical)
    print("Mask shape: ", masks_verti.shape)
    print("sample index=",sample_idx,"after vertical flipping")
    visualize.display_masked_instances(img_obj_vertical_flip, img_meta_obj_vertical.annotations, masks_verti)
Sample Index:  0
Number of annotations:  37
Image shape:  (292, 292, 3)
Mask shape:  (292, 292, 37)
292 292
###########
Number of annotations:  37
Image shape:  (292, 292, 3)
Mask shape:  (292, 292, 37)
sample index= 0  after horizontal flipping
###########
Mask shape:  (292, 292, 37)
sample index= 0 after vertical flipping
Sample Index:  1
Number of annotations:  21
Image shape:  (266, 273, 3)
Mask shape:  (266, 273, 21)
266 273
###########
Number of annotations:  21
Image shape:  (266, 273, 3)
Mask shape:  (266, 273, 21)
sample index= 1  after horizontal flipping
###########
Mask shape:  (266, 273, 21)
sample index= 1 after vertical flipping

FC Densenet Demo

Data Processing

In [19]:
# Image size that we are going to use
IMG_SIZE = 256
# Our images are RGB (3 channels)
N_CHANNELS = 3
# Foreground and backgroung
N_CLASSES = 2
In [20]:
#Loads the data and creates a tuple of images
#masks to be later made into Tensorflow data object

def dataLoader():
    img_list = []
    mask_list = []
    
    
    for i in range(len(img_metas)):
        
        img_meta_obj = img_metas[i]
        #print(file_name[i])
        img_file = img_meta_obj.load_image()
        #print("Image shape: ", img_file.shape)
        
        #Convert mask to the shape of an image (3D) with channel=1
        masks = load_mask(img_file, img_meta_obj)
        #print("Masks shape: ", masks.shape)
        #Merge all separate masks to one mask
        mask = np.sum(masks, axis = -1)
        #print("Mask shape: ", mask.shape)
        mask = np.expand_dims(mask, axis = -1)
        #print("Mask shape: ", mask.shape)
        
        #Rezize mask and image to common size
        # Resize Image to 256x256
        img_file, _, scale, padding,_ = my_utils.resize_image(img_file, max_dim = IMG_SIZE, mode="square")
        #print("Image shape: ", img_file.shape)
        # Resize Mask to 256x256
        mask = my_utils.resize_mask(mask, scale, padding)
        #Fix Overlap bug
        mask = np.where(mask > 1, 1, mask)
        mask = mask.astype(np.float32)
        #print(np.amax(mask))
        #print("Mask shape: ", mask.shape)
        
        #Normalize Image
        img_file = img_file.astype(np.float32)
        img_file = img_file/255.0
        #print("Image shape: ", img_file.shape)
        

        img_list.append(img_file)
        mask_list.append(mask)

# for horizontal flip
        img_meta_obj = img_metas[i]
        
        img_file = img_meta_obj.load_image()
        image_shape=img_file.shape
        #print("Image shape: ", img_file.shape)
        #horizontal flip image file
        img_file = cv2.flip(img_file, 1)
        img_meta_obj_horizontal = img_meta()
        img_meta_obj_horizontal.horizontal_image_augmentation(file_name[i],image_shape[1])
   
        #Convert mask to the shape of an image (3D) with channel=1
        masks = load_mask(img_file, img_meta_obj_horizontal)
        #print("Masks shape: ", masks.shape)
        #Merge all separate masks to one mask
        mask = np.sum(masks, axis = -1)
        #print("Mask shape: ", mask.shape)
        mask = np.expand_dims(mask, axis = -1)
        #print("Mask shape: ", mask.shape)
        
        #Rezize mask and image to common size
        # Resize Image to 256x256
        img_file, _, scale, padding,_ = my_utils.resize_image(img_file, max_dim = IMG_SIZE, mode="square")
        #print("Image shape: ", img_file.shape)
        # Resize Mask to 256x256
        mask = my_utils.resize_mask(mask, scale, padding)
        #Fix Overlap bug
        mask = np.where(mask > 1, 1, mask)
        mask = mask.astype(np.float32)
        #print(np.amax(mask))
        #print("Mask shape: ", mask.shape)
        
        #Normalize Image
        img_file = img_file.astype(np.float32)
        img_file = img_file/255.0
        #print("Image shape: ", img_file.shape)
        

        img_list.append(img_file)
        mask_list.append(mask)

# for vertical flip
        img_meta_obj = img_metas[i]
        
        img_file = img_meta_obj.load_image()
        image_shape=img_file.shape
        #print("Image shape: ", img_file.shape)
        #vertical flip image file
        img_file = cv2.flip(img_file, 0)
        
        img_meta_obj_vertical = img_meta()
        img_meta_obj_vertical.vertical_image_augmentation(file_name[i],image_shape[0])
        
        #Convert mask to the shape of an image (3D) with channel=1
        masks = load_mask(img_file, img_meta_obj_vertical)
        #print("Masks shape: ", masks.shape)
        #Merge all separate masks to one mask
        mask = np.sum(masks, axis = -1)
        #print("Mask shape: ", mask.shape)
        mask = np.expand_dims(mask, axis = -1)
        #print("Mask shape: ", mask.shape)
        
        #Rezize mask and image to common size
        # Resize Image to 256x256
        img_file, _, scale, padding,_ = my_utils.resize_image(img_file, max_dim = IMG_SIZE, mode="square")
        #print("Image shape: ", img_file.shape)
        # Resize Mask to 256x256
        mask = my_utils.resize_mask(mask, scale, padding)
        #Fix Overlap bug
        mask = np.where(mask > 1, 1, mask)
        mask = mask.astype(np.float32)
        #print(np.amax(mask))
        #print("Mask shape: ", mask.shape)
        
        #Normalize Image
        img_file = img_file.astype(np.float32)
        img_file = img_file/255.0
        #print("Image shape: ", img_file.shape)
        

        img_list.append(img_file)
        mask_list.append(mask)



    return (img_list, mask_list)

Create Tensorflow Data Loader

In [21]:
dataset = tf.data.Dataset.from_tensor_slices(dataLoader())
In [22]:
print("Dataset size is= ",len(dataset))
Dataset size is=  249

Train/Validation Split

In [23]:
train_size = int(0.9 * len(img_metas))
val_size = int(0.1 * len(img_metas))

train_dataset = dataset.take(train_size)
val_dataset = dataset.skip(train_size)
In [23]:
 

Training

The following training code and model code was adapted from

https://yann-leguilly.gitlab.io/post/2019-12-14-tensorflow-tfdata-segmentation/

Data Tuning Parameters

In [24]:
BATCH_SIZE = 5
BUFFER_SIZE = 1000
# important for reproducibility
# this allows to generate the same random numbers
SEED = 42
AUTOTUNE = tf.data.experimental.AUTOTUNE
In [25]:
dataset = {"train": train_dataset, "val": val_dataset}

# -- Train Dataset --#
#dataset['train'] = dataset['train'].shuffle(buffer_size=BUFFER_SIZE, seed=SEED)
dataset['train'] = dataset['train'].repeat()
dataset['train'] = dataset['train'].batch(BATCH_SIZE)
dataset['train'] = dataset['train'].prefetch(buffer_size=AUTOTUNE)

#-- Validation Dataset --#
dataset['val'] = dataset['val'].repeat()
dataset['val'] = dataset['val'].batch(BATCH_SIZE)
dataset['val'] = dataset['val'].prefetch(buffer_size=AUTOTUNE)

print(dataset['train'])
print(dataset['val'])

# how shuffle works: https://stackoverflow.com/a/53517848
<PrefetchDataset shapes: ((None, 256, 256, 3), (None, 256, 256, 1)), types: (tf.float32, tf.float32)>
<PrefetchDataset shapes: ((None, 256, 256, 3), (None, 256, 256, 1)), types: (tf.float32, tf.float32)>
In [26]:
def display_sample(display_list):
    """Show side-by-side an input image,
    the ground truth and the prediction.
    """
    plt.figure(figsize=(18, 18))

    title = ['Input Image', 'True Mask', 'Predicted Mask']

    for i in range(len(display_list)):
        plt.subplot(1, len(display_list), i+1)
        plt.title(title[i])
        plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
        plt.axis('off')
    plt.show()
In [27]:
for image, mask in dataset['train'].take(1):
    sample_image, sample_mask = image, mask

display_sample([sample_image[0], sample_mask[0]])
In [28]:
# -- Keras Functional API -- #
# -- FC Densenet Implementation -- #
# Everything here is from tensorflow.keras.layers
# I imported tensorflow.keras.layers * to make it easier to read
dropout_rate = 0.2
input_size = (IMG_SIZE, IMG_SIZE, N_CHANNELS)

# If you want to know more about why we are using `he_normal`: 
# https://stats.stackexchange.com/questions/319323/whats-the-difference-between-variance-scaling-initializer-and-xavier-initialize/319849#319849  
# Or the excelent fastai course: 
# https://github.com/fastai/course-v3/blob/master/nbs/dl2/02b_initializing.ipynb
initializer = 'he_normal'


# -- Top Down -- #

inputs = Input(shape=input_size)
conv_enc_1 = Conv2D(256, 3,  padding='same', kernel_initializer=initializer)(inputs)

# layer 1
x1=BatchNormalization()(conv_enc_1)
x1 = Conv2D(256, 3, activation = 'relu', padding='same', kernel_initializer=initializer)(x1)
x1 = tf.keras.layers.Dropout(dropout_rate)(x1)

concatenate_1 = concatenate([x1, conv_enc_1], axis = 3)


# Transition Down 1
td_x1=BatchNormalization()(concatenate_1)
td_x1 = Conv2D(256, 1, activation = 'relu', padding='same', kernel_initializer=initializer)(td_x1)
td_x1 = tf.keras.layers.Dropout(dropout_rate)(td_x1)
max_pool_enc_1 = MaxPooling2D(pool_size=(2, 2))(td_x1)

# layer 2
x2=BatchNormalization()(max_pool_enc_1)
x2 = Conv2D(256, 3, activation = 'relu', padding='same', kernel_initializer=initializer)(x2)
x2 = tf.keras.layers.Dropout(dropout_rate)(x2)


concatenate_2 = concatenate([x2, max_pool_enc_1], axis = 3)


# Transition Down 2
td_x2=BatchNormalization()(concatenate_2)
td_x2 = Conv2D(256, 1, activation = 'relu', padding='same', kernel_initializer=initializer)(td_x2)
td_x2 = tf.keras.layers.Dropout(dropout_rate)(td_x2)
max_pool_enc_2 = MaxPooling2D(pool_size=(2, 2))(td_x2)



# layer 3
x3=BatchNormalization()(max_pool_enc_2)
x3 = Conv2D(256, 3, activation = 'relu', padding='same', kernel_initializer=initializer)(x3)
x3 = tf.keras.layers.Dropout(dropout_rate)(x3)



#Transition UP 1
tu_x1 = Conv2DTranspose(256, 3, strides=(2,2), padding='same', kernel_initializer=initializer)(x3)


# layer 4
x4=BatchNormalization()(tu_x1)
x4 = Conv2D(256, 3, activation = 'relu', padding='same', kernel_initializer=initializer)(x4)
x4 = tf.keras.layers.Dropout(dropout_rate)(x4)


#Transition UP 2
tu_x2 = Conv2DTranspose(256, 3, strides=(2,2), padding='same', kernel_initializer=initializer)(x4)

# layer 5
x5=BatchNormalization()(tu_x2)
x5 = Conv2D(256, 3, activation = 'relu', padding='same', kernel_initializer=initializer)(x5)
x5 = tf.keras.layers.Dropout(dropout_rate)(x5)

output = Conv2D(256, 3,  padding='same', kernel_initializer=initializer)(x5)



#output = Conv2D(N_CLASSES, 1, activation = 'softmax')(conv_dec_4)

Model Saver Class

In [29]:
#This class will automatically save TF models
#at Epoch which are multiples of the SAVE_MULTIPLE parameter.
SAVE_MULTIPLE = 5

class ModelSaver(tf.keras.callbacks.Callback):
    
    def on_epoch_end(self, epoch, logs={}):
        if (epoch)%SAVE_MULTIPLE == 0:  # Save when epochs are multiples of SAVE_MULTIPLE.
            self.model.save(f"./saved_models/model_{epoch}.h5")

Create and Compile Model

In [30]:
model = tf.keras.Model(inputs = inputs, outputs = output)
In [31]:
model.compile(optimizer=Adam(learning_rate = 0.0001), loss = tf.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

Create moodel saver instance

In [32]:
# create and use callback:
saver = ModelSaver()

Model Summary

In [33]:
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, 256, 256, 3) 0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 256, 256, 256 7168        input_1[0][0]                    
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 256, 256, 256 1024        conv2d[0][0]                     
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 256, 256, 256 590080      batch_normalization[0][0]        
__________________________________________________________________________________________________
dropout (Dropout)               (None, 256, 256, 256 0           conv2d_1[0][0]                   
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 256, 256, 512 0           dropout[0][0]                    
                                                                 conv2d[0][0]                     
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 256, 256, 512 2048        concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 256, 256, 256 131328      batch_normalization_1[0][0]      
__________________________________________________________________________________________________
dropout_1 (Dropout)             (None, 256, 256, 256 0           conv2d_2[0][0]                   
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 128, 128, 256 0           dropout_1[0][0]                  
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 128, 128, 256 1024        max_pooling2d[0][0]              
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 128, 128, 256 590080      batch_normalization_2[0][0]      
__________________________________________________________________________________________________
dropout_2 (Dropout)             (None, 128, 128, 256 0           conv2d_3[0][0]                   
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 128, 128, 512 0           dropout_2[0][0]                  
                                                                 max_pooling2d[0][0]              
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 128, 128, 512 2048        concatenate_1[0][0]              
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 128, 128, 256 131328      batch_normalization_3[0][0]      
__________________________________________________________________________________________________
dropout_3 (Dropout)             (None, 128, 128, 256 0           conv2d_4[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 64, 64, 256)  0           dropout_3[0][0]                  
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 64, 64, 256)  1024        max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 64, 64, 256)  590080      batch_normalization_4[0][0]      
__________________________________________________________________________________________________
dropout_4 (Dropout)             (None, 64, 64, 256)  0           conv2d_5[0][0]                   
__________________________________________________________________________________________________
conv2d_transpose (Conv2DTranspo (None, 128, 128, 256 590080      dropout_4[0][0]                  
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 128, 128, 256 1024        conv2d_transpose[0][0]           
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 128, 128, 256 590080      batch_normalization_5[0][0]      
__________________________________________________________________________________________________
dropout_5 (Dropout)             (None, 128, 128, 256 0           conv2d_6[0][0]                   
__________________________________________________________________________________________________
conv2d_transpose_1 (Conv2DTrans (None, 256, 256, 256 590080      dropout_5[0][0]                  
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 256, 256, 256 1024        conv2d_transpose_1[0][0]         
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 256, 256, 256 590080      batch_normalization_6[0][0]      
__________________________________________________________________________________________________
dropout_6 (Dropout)             (None, 256, 256, 256 0           conv2d_7[0][0]                   
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 256, 256, 256 590080      dropout_6[0][0]                  
==================================================================================================
Total params: 4,999,680
Trainable params: 4,995,072
Non-trainable params: 4,608
__________________________________________________________________________________________________

Training Hyperparameters

In [ ]:
EPOCHS = 250

STEPS_PER_EPOCH = train_size // BATCH_SIZE
VALIDATION_STEPS = val_size // BATCH_SIZE

Start Training

In [ ]:
# sometimes it can be very interesting to run some batches on cpu
# because the tracing is way better than on GPU
# you will have more obvious error message
# but in our case, it takes A LOT of time

# #On CPU
# with tf.device("/cpu:0"):
#     model_history = model.fit(dataset['train'], epochs=EPOCHS,
#                               callbacks=[saver],
#                               steps_per_epoch=STEPS_PER_EPOCH,
#                               validation_steps=VALIDATION_STEPS,
#                               validation_data=dataset['val'])

  #On GPU
model_history = model.fit(dataset['train'], epochs=EPOCHS,
                          callbacks=[saver],
                          steps_per_epoch=STEPS_PER_EPOCH,
                          validation_steps=VALIDATION_STEPS,
                          validation_data=dataset['val'])
Epoch 1/250
 6/14 [===========>..................] - ETA: 8s - loss: 12.8110 - accuracy: 0.0077WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.4401s vs `on_train_batch_end` time: 0.4837s). Check your callbacks.
14/14 [==============================] - 59s 1s/step - loss: 10.6289 - accuracy: 0.0618 - val_loss: 3.2582 - val_accuracy: 0.4930
Epoch 2/250
14/14 [==============================] - 15s 1s/step - loss: 2.6239 - accuracy: 0.5868 - val_loss: 0.6761 - val_accuracy: 0.7827
Epoch 3/250
14/14 [==============================] - 15s 1s/step - loss: 0.8535 - accuracy: 0.7397 - val_loss: 0.5839 - val_accuracy: 0.9202
Epoch 4/250
14/14 [==============================] - 15s 1s/step - loss: 0.6711 - accuracy: 0.8289 - val_loss: 0.4859 - val_accuracy: 0.9523
Epoch 5/250
14/14 [==============================] - 16s 1s/step - loss: 0.5682 - accuracy: 0.9144 - val_loss: 0.5208 - val_accuracy: 0.9597
Epoch 6/250
14/14 [==============================] - 16s 1s/step - loss: 0.4647 - accuracy: 0.9614 - val_loss: 0.6457 - val_accuracy: 0.9597
Epoch 7/250
14/14 [==============================] - 16s 1s/step - loss: 0.4347 - accuracy: 0.9681 - val_loss: 0.6299 - val_accuracy: 0.9597
Epoch 8/250
14/14 [==============================] - 16s 1s/step - loss: 0.4150 - accuracy: 0.9565 - val_loss: 0.6145 - val_accuracy: 0.9590
Epoch 9/250
14/14 [==============================] - 16s 1s/step - loss: 0.3693 - accuracy: 0.9467 - val_loss: 0.6107 - val_accuracy: 0.9591
Epoch 10/250
14/14 [==============================] - 16s 1s/step - loss: 0.3628 - accuracy: 0.9468 - val_loss: 0.5685 - val_accuracy: 0.9595
Epoch 11/250
14/14 [==============================] - 16s 1s/step - loss: 0.3995 - accuracy: 0.9336 - val_loss: 0.5994 - val_accuracy: 0.9580
Epoch 12/250
14/14 [==============================] - 16s 1s/step - loss: 0.3871 - accuracy: 0.9319 - val_loss: 0.5739 - val_accuracy: 0.9585
Epoch 13/250
14/14 [==============================] - 16s 1s/step - loss: 0.3270 - accuracy: 0.9302 - val_loss: 0.5633 - val_accuracy: 0.9578
Epoch 14/250
14/14 [==============================] - 16s 1s/step - loss: 0.3112 - accuracy: 0.9305 - val_loss: 0.5551 - val_accuracy: 0.9579
Epoch 15/250
14/14 [==============================] - 16s 1s/step - loss: 0.2910 - accuracy: 0.9340 - val_loss: 0.5567 - val_accuracy: 0.9583
Epoch 16/250
14/14 [==============================] - 16s 1s/step - loss: 0.2688 - accuracy: 0.9391 - val_loss: 0.5609 - val_accuracy: 0.9586
Epoch 17/250
14/14 [==============================] - 16s 1s/step - loss: 0.2472 - accuracy: 0.9432 - val_loss: 0.5444 - val_accuracy: 0.9591
Epoch 18/250
14/14 [==============================] - 16s 1s/step - loss: 0.2382 - accuracy: 0.9434 - val_loss: 0.5060 - val_accuracy: 0.9595
Epoch 19/250
14/14 [==============================] - 16s 1s/step - loss: 0.2240 - accuracy: 0.9435 - val_loss: 0.5104 - val_accuracy: 0.9596
Epoch 20/250
14/14 [==============================] - 16s 1s/step - loss: 0.2080 - accuracy: 0.9463 - val_loss: 0.5169 - val_accuracy: 0.9596
Epoch 21/250
14/14 [==============================] - 16s 1s/step - loss: 0.2166 - accuracy: 0.9437 - val_loss: 0.5191 - val_accuracy: 0.9596
Epoch 22/250
14/14 [==============================] - 16s 1s/step - loss: 0.2846 - accuracy: 0.9373 - val_loss: 0.4731 - val_accuracy: 0.9589
Epoch 23/250
14/14 [==============================] - 16s 1s/step - loss: 0.2481 - accuracy: 0.9400 - val_loss: 0.4592 - val_accuracy: 0.9588
Epoch 24/250
14/14 [==============================] - 16s 1s/step - loss: 0.2278 - accuracy: 0.9449 - val_loss: 0.4498 - val_accuracy: 0.9587
Epoch 25/250
14/14 [==============================] - 16s 1s/step - loss: 0.2164 - accuracy: 0.9443 - val_loss: 0.4338 - val_accuracy: 0.9587
Epoch 26/250
14/14 [==============================] - 16s 1s/step - loss: 0.2126 - accuracy: 0.9432 - val_loss: 0.4154 - val_accuracy: 0.9586
Epoch 27/250
14/14 [==============================] - 16s 1s/step - loss: 0.1961 - accuracy: 0.9470 - val_loss: 0.4034 - val_accuracy: 0.9585
Epoch 28/250
14/14 [==============================] - 16s 1s/step - loss: 0.1831 - accuracy: 0.9502 - val_loss: 0.4020 - val_accuracy: 0.9584
Epoch 29/250
14/14 [==============================] - 16s 1s/step - loss: 0.1919 - accuracy: 0.9507 - val_loss: 0.3913 - val_accuracy: 0.9582
Epoch 30/250
14/14 [==============================] - 16s 1s/step - loss: 0.1940 - accuracy: 0.9507 - val_loss: 0.2670 - val_accuracy: 0.9566
Epoch 31/250
14/14 [==============================] - 16s 1s/step - loss: 0.2075 - accuracy: 0.9393 - val_loss: 0.2312 - val_accuracy: 0.9495
Epoch 32/250
14/14 [==============================] - 16s 1s/step - loss: 0.2100 - accuracy: 0.9345 - val_loss: 0.2228 - val_accuracy: 0.9490
Epoch 33/250
14/14 [==============================] - 16s 1s/step - loss: 0.1947 - accuracy: 0.9394 - val_loss: 0.2146 - val_accuracy: 0.9497
Epoch 34/250
14/14 [==============================] - 16s 1s/step - loss: 0.1785 - accuracy: 0.9440 - val_loss: 0.2135 - val_accuracy: 0.9508
Epoch 35/250
14/14 [==============================] - 16s 1s/step - loss: 0.1725 - accuracy: 0.9474 - val_loss: 0.2137 - val_accuracy: 0.9520
Epoch 36/250
14/14 [==============================] - 16s 1s/step - loss: 0.1703 - accuracy: 0.9497 - val_loss: 0.2104 - val_accuracy: 0.9534
Epoch 37/250
14/14 [==============================] - 16s 1s/step - loss: 0.1630 - accuracy: 0.9526 - val_loss: 0.2145 - val_accuracy: 0.9544
Epoch 38/250
14/14 [==============================] - 16s 1s/step - loss: 0.1527 - accuracy: 0.9575 - val_loss: 0.2264 - val_accuracy: 0.9555
Epoch 39/250
14/14 [==============================] - 16s 1s/step - loss: 0.1462 - accuracy: 0.9589 - val_loss: 0.2384 - val_accuracy: 0.9562
Epoch 40/250
14/14 [==============================] - 16s 1s/step - loss: 0.1880 - accuracy: 0.9525 - val_loss: 0.2455 - val_accuracy: 0.9566
Epoch 41/250
14/14 [==============================] - 16s 1s/step - loss: 0.2151 - accuracy: 0.9494 - val_loss: 0.2768 - val_accuracy: 0.9578
Epoch 42/250
14/14 [==============================] - 16s 1s/step - loss: 0.2017 - accuracy: 0.9535 - val_loss: 0.2450 - val_accuracy: 0.9575
Epoch 43/250
14/14 [==============================] - 16s 1s/step - loss: 0.1863 - accuracy: 0.9531 - val_loss: 0.2355 - val_accuracy: 0.9575
Epoch 44/250
14/14 [==============================] - 16s 1s/step - loss: 0.1701 - accuracy: 0.9558 - val_loss: 0.2243 - val_accuracy: 0.9575
Epoch 45/250
14/14 [==============================] - 16s 1s/step - loss: 0.1579 - accuracy: 0.9577 - val_loss: 0.2245 - val_accuracy: 0.9578
Epoch 46/250
14/14 [==============================] - 16s 1s/step - loss: 0.1502 - accuracy: 0.9623 - val_loss: 0.2325 - val_accuracy: 0.9582
Epoch 47/250
14/14 [==============================] - 16s 1s/step - loss: 0.1516 - accuracy: 0.9637 - val_loss: 0.2339 - val_accuracy: 0.9586
Epoch 48/250
14/14 [==============================] - 16s 1s/step - loss: 0.1580 - accuracy: 0.9617 - val_loss: 0.2389 - val_accuracy: 0.9592
Epoch 49/250
14/14 [==============================] - 16s 1s/step - loss: 0.1669 - accuracy: 0.9609 - val_loss: 0.2257 - val_accuracy: 0.9594
Epoch 50/250
14/14 [==============================] - 16s 1s/step - loss: 0.1540 - accuracy: 0.9639 - val_loss: 0.2082 - val_accuracy: 0.9594
Epoch 51/250
14/14 [==============================] - 16s 1s/step - loss: 0.1485 - accuracy: 0.9645 - val_loss: 0.1950 - val_accuracy: 0.9596
Epoch 52/250
14/14 [==============================] - 16s 1s/step - loss: 0.1472 - accuracy: 0.9627 - val_loss: 0.1878 - val_accuracy: 0.9576
Epoch 53/250
14/14 [==============================] - 16s 1s/step - loss: 0.1469 - accuracy: 0.9595 - val_loss: 0.1853 - val_accuracy: 0.9567
Epoch 54/250
14/14 [==============================] - 16s 1s/step - loss: 0.1429 - accuracy: 0.9597 - val_loss: 0.1872 - val_accuracy: 0.9573
Epoch 55/250
14/14 [==============================] - 16s 1s/step - loss: 0.1444 - accuracy: 0.9604 - val_loss: 0.1711 - val_accuracy: 0.9568
Epoch 56/250
14/14 [==============================] - 16s 1s/step - loss: 0.1360 - accuracy: 0.9608 - val_loss: 0.1766 - val_accuracy: 0.9561
Epoch 57/250
14/14 [==============================] - 16s 1s/step - loss: 0.1280 - accuracy: 0.9648 - val_loss: 0.1879 - val_accuracy: 0.9569
Epoch 58/250
14/14 [==============================] - 16s 1s/step - loss: 0.1375 - accuracy: 0.9612 - val_loss: 0.1995 - val_accuracy: 0.9580
Epoch 59/250
14/14 [==============================] - 16s 1s/step - loss: 0.2023 - accuracy: 0.9534 - val_loss: 0.2020 - val_accuracy: 0.9586
Epoch 60/250
14/14 [==============================] - 16s 1s/step - loss: 0.1812 - accuracy: 0.9562 - val_loss: 0.2083 - val_accuracy: 0.9593
Epoch 61/250
14/14 [==============================] - 16s 1s/step - loss: 0.1617 - accuracy: 0.9619 - val_loss: 0.2136 - val_accuracy: 0.9595
Epoch 62/250
14/14 [==============================] - 16s 1s/step - loss: 0.1571 - accuracy: 0.9631 - val_loss: 0.1745 - val_accuracy: 0.9592
Epoch 63/250
14/14 [==============================] - 16s 1s/step - loss: 0.1483 - accuracy: 0.9633 - val_loss: 0.1718 - val_accuracy: 0.9592
Epoch 64/250
14/14 [==============================] - 16s 1s/step - loss: 0.1362 - accuracy: 0.9668 - val_loss: 0.1876 - val_accuracy: 0.9596
Epoch 65/250
14/14 [==============================] - 16s 1s/step - loss: 0.1258 - accuracy: 0.9690 - val_loss: 0.1770 - val_accuracy: 0.9595
Epoch 66/250
14/14 [==============================] - 16s 1s/step - loss: 0.1338 - accuracy: 0.9622 - val_loss: 0.1544 - val_accuracy: 0.9538
Epoch 67/250
14/14 [==============================] - 16s 1s/step - loss: 0.1446 - accuracy: 0.9547 - val_loss: 0.1515 - val_accuracy: 0.9488
Epoch 68/250
14/14 [==============================] - 16s 1s/step - loss: 0.1536 - accuracy: 0.9445 - val_loss: 0.1551 - val_accuracy: 0.9457
Epoch 69/250
14/14 [==============================] - 16s 1s/step - loss: 0.1529 - accuracy: 0.9421 - val_loss: 0.1583 - val_accuracy: 0.9485
Epoch 70/250
14/14 [==============================] - 16s 1s/step - loss: 0.1401 - accuracy: 0.9490 - val_loss: 0.1669 - val_accuracy: 0.9513
Epoch 71/250
14/14 [==============================] - 16s 1s/step - loss: 0.1310 - accuracy: 0.9546 - val_loss: 0.1774 - val_accuracy: 0.9537
Epoch 72/250
14/14 [==============================] - 16s 1s/step - loss: 0.1254 - accuracy: 0.9594 - val_loss: 0.1813 - val_accuracy: 0.9555
Epoch 73/250
14/14 [==============================] - 16s 1s/step - loss: 0.1244 - accuracy: 0.9616 - val_loss: 0.1700 - val_accuracy: 0.9551
Epoch 74/250
14/14 [==============================] - 16s 1s/step - loss: 0.1250 - accuracy: 0.9555 - val_loss: 0.1813 - val_accuracy: 0.9487
Epoch 75/250
14/14 [==============================] - 16s 1s/step - loss: 0.1263 - accuracy: 0.9484 - val_loss: 0.2000 - val_accuracy: 0.9495
Epoch 76/250
14/14 [==============================] - 16s 1s/step - loss: 0.1291 - accuracy: 0.9490 - val_loss: 0.2086 - val_accuracy: 0.9510
Epoch 77/250
14/14 [==============================] - 16s 1s/step - loss: 0.1634 - accuracy: 0.9456 - val_loss: 0.2010 - val_accuracy: 0.9522
Epoch 78/250
14/14 [==============================] - 16s 1s/step - loss: 0.1783 - accuracy: 0.9448 - val_loss: 0.1749 - val_accuracy: 0.9518
Epoch 79/250
14/14 [==============================] - 16s 1s/step - loss: 0.1570 - accuracy: 0.9456 - val_loss: 0.1584 - val_accuracy: 0.9515
Epoch 80/250
14/14 [==============================] - 16s 1s/step - loss: 0.1466 - accuracy: 0.9482 - val_loss: 0.1602 - val_accuracy: 0.9539
Epoch 81/250
14/14 [==============================] - 16s 1s/step - loss: 0.1324 - accuracy: 0.9546 - val_loss: 0.1670 - val_accuracy: 0.9562
Epoch 82/250
14/14 [==============================] - 16s 1s/step - loss: 0.1239 - accuracy: 0.9588 - val_loss: 0.1759 - val_accuracy: 0.9578
Epoch 83/250
14/14 [==============================] - 16s 1s/step - loss: 0.1170 - accuracy: 0.9637 - val_loss: 0.1744 - val_accuracy: 0.9589
Epoch 84/250
14/14 [==============================] - 16s 1s/step - loss: 0.1181 - accuracy: 0.9639 - val_loss: 0.1733 - val_accuracy: 0.9600
Epoch 85/250
14/14 [==============================] - 16s 1s/step - loss: 0.1262 - accuracy: 0.9627 - val_loss: 0.1821 - val_accuracy: 0.9609
Epoch 86/250
14/14 [==============================] - 16s 1s/step - loss: 0.1369 - accuracy: 0.9614 - val_loss: 0.1394 - val_accuracy: 0.9535
Epoch 87/250
14/14 [==============================] - 16s 1s/step - loss: 0.1333 - accuracy: 0.9529 - val_loss: 0.1408 - val_accuracy: 0.9491
Epoch 88/250
14/14 [==============================] - 16s 1s/step - loss: 0.1325 - accuracy: 0.9519 - val_loss: 0.1413 - val_accuracy: 0.9520
Epoch 89/250
14/14 [==============================] - 16s 1s/step - loss: 0.1254 - accuracy: 0.9577 - val_loss: 0.1444 - val_accuracy: 0.9547
Epoch 90/250
14/14 [==============================] - 16s 1s/step - loss: 0.1209 - accuracy: 0.9595 - val_loss: 0.1484 - val_accuracy: 0.9561
Epoch 91/250
14/14 [==============================] - 16s 1s/step - loss: 0.1141 - accuracy: 0.9613 - val_loss: 0.1555 - val_accuracy: 0.9573
Epoch 92/250
14/14 [==============================] - 16s 1s/step - loss: 0.1217 - accuracy: 0.9622 - val_loss: 0.1688 - val_accuracy: 0.9576
Epoch 93/250
14/14 [==============================] - 16s 1s/step - loss: 0.1104 - accuracy: 0.9662 - val_loss: 0.1855 - val_accuracy: 0.9562
Epoch 94/250
14/14 [==============================] - 16s 1s/step - loss: 0.1024 - accuracy: 0.9684 - val_loss: 0.1684 - val_accuracy: 0.9561
Epoch 95/250
14/14 [==============================] - 16s 1s/step - loss: 0.1210 - accuracy: 0.9617 - val_loss: 0.1630 - val_accuracy: 0.9566
Epoch 96/250
14/14 [==============================] - 16s 1s/step - loss: 0.1867 - accuracy: 0.9530 - val_loss: 0.1598 - val_accuracy: 0.9571
Epoch 97/250
14/14 [==============================] - 16s 1s/step - loss: 0.1650 - accuracy: 0.9556 - val_loss: 0.1589 - val_accuracy: 0.9576
Epoch 98/250
14/14 [==============================] - 16s 1s/step - loss: 0.1395 - accuracy: 0.9582 - val_loss: 0.1560 - val_accuracy: 0.9567
Epoch 99/250
14/14 [==============================] - 16s 1s/step - loss: 0.1343 - accuracy: 0.9577 - val_loss: 0.1628 - val_accuracy: 0.9579
Epoch 100/250
14/14 [==============================] - 16s 1s/step - loss: 0.1316 - accuracy: 0.9612 - val_loss: 0.1716 - val_accuracy: 0.9590
Epoch 101/250
14/14 [==============================] - 16s 1s/step - loss: 0.1204 - accuracy: 0.9658 - val_loss: 0.1649 - val_accuracy: 0.9597
Epoch 102/250
14/14 [==============================] - 16s 1s/step - loss: 0.1102 - accuracy: 0.9676 - val_loss: 0.1657 - val_accuracy: 0.9603
Epoch 103/250
14/14 [==============================] - 16s 1s/step - loss: 0.1177 - accuracy: 0.9654 - val_loss: 0.1565 - val_accuracy: 0.9606
Epoch 104/250
14/14 [==============================] - 16s 1s/step - loss: 0.1206 - accuracy: 0.9640 - val_loss: 0.1595 - val_accuracy: 0.9609
Epoch 105/250
14/14 [==============================] - 16s 1s/step - loss: 0.1245 - accuracy: 0.9642 - val_loss: 0.1649 - val_accuracy: 0.9614
Epoch 106/250
14/14 [==============================] - 16s 1s/step - loss: 0.1228 - accuracy: 0.9659 - val_loss: 0.1648 - val_accuracy: 0.9619
Epoch 107/250
14/14 [==============================] - 16s 1s/step - loss: 0.1135 - accuracy: 0.9682 - val_loss: 0.1592 - val_accuracy: 0.9620
Epoch 108/250
14/14 [==============================] - 16s 1s/step - loss: 0.1073 - accuracy: 0.9694 - val_loss: 0.1494 - val_accuracy: 0.9622
Epoch 109/250
14/14 [==============================] - 16s 1s/step - loss: 0.0999 - accuracy: 0.9713 - val_loss: 0.1411 - val_accuracy: 0.9618
Epoch 110/250
14/14 [==============================] - 16s 1s/step - loss: 0.1008 - accuracy: 0.9711 - val_loss: 0.1416 - val_accuracy: 0.9616
Epoch 111/250
14/14 [==============================] - 16s 1s/step - loss: 0.0985 - accuracy: 0.9723 - val_loss: 0.1482 - val_accuracy: 0.9622
Epoch 112/250
14/14 [==============================] - 16s 1s/step - loss: 0.0900 - accuracy: 0.9757 - val_loss: 0.1498 - val_accuracy: 0.9623
Epoch 113/250
14/14 [==============================] - 16s 1s/step - loss: 0.0920 - accuracy: 0.9715 - val_loss: 0.1400 - val_accuracy: 0.9575
Epoch 114/250
14/14 [==============================] - 16s 1s/step - loss: 0.1338 - accuracy: 0.9624 - val_loss: 0.1432 - val_accuracy: 0.9583
Epoch 115/250
14/14 [==============================] - 16s 1s/step - loss: 0.1586 - accuracy: 0.9601 - val_loss: 0.1527 - val_accuracy: 0.9604
Epoch 116/250
14/14 [==============================] - 16s 1s/step - loss: 0.1488 - accuracy: 0.9640 - val_loss: 0.1474 - val_accuracy: 0.9612
Epoch 117/250
14/14 [==============================] - 16s 1s/step - loss: 0.1328 - accuracy: 0.9657 - val_loss: 0.1283 - val_accuracy: 0.9600
Epoch 118/250
14/14 [==============================] - 16s 1s/step - loss: 0.1152 - accuracy: 0.9676 - val_loss: 0.1263 - val_accuracy: 0.9588
Epoch 119/250
14/14 [==============================] - 16s 1s/step - loss: 0.1075 - accuracy: 0.9679 - val_loss: 0.1275 - val_accuracy: 0.9574
Epoch 120/250
14/14 [==============================] - 16s 1s/step - loss: 0.1029 - accuracy: 0.9706 - val_loss: 0.1343 - val_accuracy: 0.9612
Epoch 121/250
14/14 [==============================] - 16s 1s/step - loss: 0.1047 - accuracy: 0.9726 - val_loss: 0.1519 - val_accuracy: 0.9625
Epoch 122/250
14/14 [==============================] - 16s 1s/step - loss: 0.1124 - accuracy: 0.9725 - val_loss: 0.1470 - val_accuracy: 0.9626
Epoch 123/250
14/14 [==============================] - 16s 1s/step - loss: 0.1211 - accuracy: 0.9707 - val_loss: 0.1343 - val_accuracy: 0.9623
Epoch 124/250
14/14 [==============================] - 16s 1s/step - loss: 0.1072 - accuracy: 0.9727 - val_loss: 0.1330 - val_accuracy: 0.9614
Epoch 125/250
14/14 [==============================] - 16s 1s/step - loss: 0.1018 - accuracy: 0.9722 - val_loss: 0.1396 - val_accuracy: 0.9604
Epoch 126/250
14/14 [==============================] - 16s 1s/step - loss: 0.0976 - accuracy: 0.9728 - val_loss: 0.1389 - val_accuracy: 0.9597
Epoch 127/250
14/14 [==============================] - 16s 1s/step - loss: 0.0910 - accuracy: 0.9749 - val_loss: 0.1283 - val_accuracy: 0.9526
Epoch 128/250
14/14 [==============================] - 16s 1s/step - loss: 0.0906 - accuracy: 0.9738 - val_loss: 0.1293 - val_accuracy: 0.9541
Epoch 129/250
14/14 [==============================] - 16s 1s/step - loss: 0.0925 - accuracy: 0.9764 - val_loss: 0.1427 - val_accuracy: 0.9599
Epoch 130/250
14/14 [==============================] - 16s 1s/step - loss: 0.0807 - accuracy: 0.9798 - val_loss: 0.1581 - val_accuracy: 0.9613
Epoch 131/250
14/14 [==============================] - 16s 1s/step - loss: 0.0713 - accuracy: 0.9812 - val_loss: 0.1373 - val_accuracy: 0.9582
Epoch 132/250
14/14 [==============================] - 16s 1s/step - loss: 0.0914 - accuracy: 0.9744 - val_loss: 0.1378 - val_accuracy: 0.9551
Epoch 133/250
14/14 [==============================] - 16s 1s/step - loss: 0.1459 - accuracy: 0.9662 - val_loss: 0.1497 - val_accuracy: 0.9562
Epoch 134/250
14/14 [==============================] - 16s 1s/step - loss: 0.1291 - accuracy: 0.9684 - val_loss: 0.1546 - val_accuracy: 0.9587
Epoch 135/250
14/14 [==============================] - 16s 1s/step - loss: 0.1164 - accuracy: 0.9711 - val_loss: 0.1519 - val_accuracy: 0.9598
Epoch 136/250
14/14 [==============================] - 16s 1s/step - loss: 0.1186 - accuracy: 0.9711 - val_loss: 0.1416 - val_accuracy: 0.9589
Epoch 137/250
14/14 [==============================] - 16s 1s/step - loss: 0.1100 - accuracy: 0.9720 - val_loss: 0.1336 - val_accuracy: 0.9580
Epoch 138/250
14/14 [==============================] - 16s 1s/step - loss: 0.0958 - accuracy: 0.9748 - val_loss: 0.1321 - val_accuracy: 0.9530
Epoch 139/250
14/14 [==============================] - 16s 1s/step - loss: 0.0893 - accuracy: 0.9750 - val_loss: 0.1367 - val_accuracy: 0.9566
Epoch 140/250
14/14 [==============================] - 16s 1s/step - loss: 0.0958 - accuracy: 0.9745 - val_loss: 0.1273 - val_accuracy: 0.9496
Epoch 141/250
14/14 [==============================] - 16s 1s/step - loss: 0.0999 - accuracy: 0.9733 - val_loss: 0.1231 - val_accuracy: 0.9551
Epoch 142/250
14/14 [==============================] - 16s 1s/step - loss: 0.1075 - accuracy: 0.9732 - val_loss: 0.1224 - val_accuracy: 0.9559
Epoch 143/250
14/14 [==============================] - 16s 1s/step - loss: 0.0965 - accuracy: 0.9737 - val_loss: 0.1211 - val_accuracy: 0.9541
Epoch 144/250
14/14 [==============================] - 16s 1s/step - loss: 0.0880 - accuracy: 0.9744 - val_loss: 0.1232 - val_accuracy: 0.9541
Epoch 145/250
14/14 [==============================] - 16s 1s/step - loss: 0.0836 - accuracy: 0.9761 - val_loss: 0.1287 - val_accuracy: 0.9543
Epoch 146/250
14/14 [==============================] - 16s 1s/step - loss: 0.0781 - accuracy: 0.9772 - val_loss: 0.1356 - val_accuracy: 0.9540
Epoch 147/250
14/14 [==============================] - 16s 1s/step - loss: 0.0762 - accuracy: 0.9780 - val_loss: 0.1334 - val_accuracy: 0.9546
Epoch 148/250
14/14 [==============================] - 16s 1s/step - loss: 0.0726 - accuracy: 0.9792 - val_loss: 0.1396 - val_accuracy: 0.9360
Epoch 149/250
14/14 [==============================] - 16s 1s/step - loss: 0.0847 - accuracy: 0.9701 - val_loss: 0.1575 - val_accuracy: 0.9364
Epoch 150/250
14/14 [==============================] - 16s 1s/step - loss: 0.0860 - accuracy: 0.9695 - val_loss: 0.1683 - val_accuracy: 0.9431
Epoch 151/250
14/14 [==============================] - 16s 1s/step - loss: 0.1178 - accuracy: 0.9669 - val_loss: 0.1649 - val_accuracy: 0.9490
Epoch 152/250
14/14 [==============================] - 16s 1s/step - loss: 0.1397 - accuracy: 0.9657 - val_loss: 0.1520 - val_accuracy: 0.9528
Epoch 153/250
14/14 [==============================] - 16s 1s/step - loss: 0.1281 - accuracy: 0.9693 - val_loss: 0.1363 - val_accuracy: 0.9489
Epoch 154/250
14/14 [==============================] - 16s 1s/step - loss: 0.1120 - accuracy: 0.9710 - val_loss: 0.1381 - val_accuracy: 0.9522
Epoch 155/250
14/14 [==============================] - 16s 1s/step - loss: 0.1009 - accuracy: 0.9730 - val_loss: 0.1402 - val_accuracy: 0.9535
Epoch 156/250
14/14 [==============================] - 16s 1s/step - loss: 0.0949 - accuracy: 0.9737 - val_loss: 0.1308 - val_accuracy: 0.9536
Epoch 157/250
14/14 [==============================] - 16s 1s/step - loss: 0.0871 - accuracy: 0.9766 - val_loss: 0.1243 - val_accuracy: 0.9521
Epoch 158/250
14/14 [==============================] - 16s 1s/step - loss: 0.0881 - accuracy: 0.9759 - val_loss: 0.1392 - val_accuracy: 0.9480
Epoch 159/250
14/14 [==============================] - 16s 1s/step - loss: 0.0903 - accuracy: 0.9722 - val_loss: 0.1430 - val_accuracy: 0.9484
Epoch 160/250
14/14 [==============================] - 16s 1s/step - loss: 0.1013 - accuracy: 0.9707 - val_loss: 0.1291 - val_accuracy: 0.9477
Epoch 161/250
14/14 [==============================] - 16s 1s/step - loss: 0.0911 - accuracy: 0.9733 - val_loss: 0.1256 - val_accuracy: 0.9457
Epoch 162/250
14/14 [==============================] - 16s 1s/step - loss: 0.0880 - accuracy: 0.9743 - val_loss: 0.1232 - val_accuracy: 0.9500
Epoch 163/250
14/14 [==============================] - 16s 1s/step - loss: 0.0852 - accuracy: 0.9755 - val_loss: 0.1298 - val_accuracy: 0.9425
Epoch 164/250
14/14 [==============================] - 16s 1s/step - loss: 0.0793 - accuracy: 0.9766 - val_loss: 0.1415 - val_accuracy: 0.9514
Epoch 165/250
14/14 [==============================] - 16s 1s/step - loss: 0.0766 - accuracy: 0.9786 - val_loss: 0.1399 - val_accuracy: 0.9522
Epoch 166/250
14/14 [==============================] - 16s 1s/step - loss: 0.0745 - accuracy: 0.9790 - val_loss: 0.1257 - val_accuracy: 0.9474
Epoch 167/250
14/14 [==============================] - 16s 1s/step - loss: 0.0659 - accuracy: 0.9811 - val_loss: 0.1254 - val_accuracy: 0.9494
Epoch 168/250
14/14 [==============================] - 16s 1s/step - loss: 0.0610 - accuracy: 0.9824 - val_loss: 0.1272 - val_accuracy: 0.9487
Epoch 169/250
14/14 [==============================] - 16s 1s/step - loss: 0.0812 - accuracy: 0.9757 - val_loss: 0.1289 - val_accuracy: 0.9456
Epoch 170/250
14/14 [==============================] - 16s 1s/step - loss: 0.1266 - accuracy: 0.9689 - val_loss: 0.1244 - val_accuracy: 0.9496
Epoch 171/250
14/14 [==============================] - 16s 1s/step - loss: 0.1433 - accuracy: 0.9530 - val_loss: 0.2538 - val_accuracy: 0.9040
Epoch 172/250
14/14 [==============================] - 16s 1s/step - loss: 0.1952 - accuracy: 0.9078 - val_loss: 0.2082 - val_accuracy: 0.8780
Epoch 173/250
14/14 [==============================] - 16s 1s/step - loss: 0.1810 - accuracy: 0.9083 - val_loss: 0.1967 - val_accuracy: 0.8746
Epoch 174/250
14/14 [==============================] - 16s 1s/step - loss: 0.1666 - accuracy: 0.9176 - val_loss: 0.1856 - val_accuracy: 0.8833
Epoch 175/250
14/14 [==============================] - 16s 1s/step - loss: 0.1455 - accuracy: 0.9307 - val_loss: 0.1727 - val_accuracy: 0.9006
Epoch 176/250
14/14 [==============================] - 16s 1s/step - loss: 0.1298 - accuracy: 0.9402 - val_loss: 0.1621 - val_accuracy: 0.9138
Epoch 177/250
14/14 [==============================] - 16s 1s/step - loss: 0.1279 - accuracy: 0.9464 - val_loss: 0.1544 - val_accuracy: 0.9215
Epoch 178/250
14/14 [==============================] - 16s 1s/step - loss: 0.1244 - accuracy: 0.9519 - val_loss: 0.1568 - val_accuracy: 0.9144
Epoch 179/250
14/14 [==============================] - 16s 1s/step - loss: 0.1181 - accuracy: 0.9528 - val_loss: 0.1520 - val_accuracy: 0.9209
Epoch 180/250
14/14 [==============================] - 16s 1s/step - loss: 0.1127 - accuracy: 0.9588 - val_loss: 0.1488 - val_accuracy: 0.9248
Epoch 181/250
14/14 [==============================] - 16s 1s/step - loss: 0.1020 - accuracy: 0.9619 - val_loss: 0.1528 - val_accuracy: 0.9178
Epoch 182/250
14/14 [==============================] - 16s 1s/step - loss: 0.0966 - accuracy: 0.9651 - val_loss: 0.1401 - val_accuracy: 0.9337
Epoch 183/250
14/14 [==============================] - 16s 1s/step - loss: 0.0905 - accuracy: 0.9704 - val_loss: 0.1366 - val_accuracy: 0.9373
Epoch 184/250
14/14 [==============================] - 16s 1s/step - loss: 0.0853 - accuracy: 0.9730 - val_loss: 0.1375 - val_accuracy: 0.9344
Epoch 185/250
14/14 [==============================] - 16s 1s/step - loss: 0.0820 - accuracy: 0.9740 - val_loss: 0.1420 - val_accuracy: 0.9279
Epoch 186/250
14/14 [==============================] - 16s 1s/step - loss: 0.0761 - accuracy: 0.9757 - val_loss: 0.1306 - val_accuracy: 0.9414
Epoch 187/250
14/14 [==============================] - 16s 1s/step - loss: 0.0768 - accuracy: 0.9765 - val_loss: 0.1281 - val_accuracy: 0.9442
Epoch 188/250
14/14 [==============================] - 16s 1s/step - loss: 0.1237 - accuracy: 0.9693 - val_loss: 0.1293 - val_accuracy: 0.9416
Epoch 189/250
14/14 [==============================] - 16s 1s/step - loss: 0.1412 - accuracy: 0.9666 - val_loss: 0.1311 - val_accuracy: 0.9381
Epoch 190/250
14/14 [==============================] - 16s 1s/step - loss: 0.1224 - accuracy: 0.9694 - val_loss: 0.1281 - val_accuracy: 0.9415
Epoch 191/250
14/14 [==============================] - 16s 1s/step - loss: 0.1148 - accuracy: 0.9708 - val_loss: 0.1303 - val_accuracy: 0.9378
Epoch 192/250
14/14 [==============================] - 16s 1s/step - loss: 0.0980 - accuracy: 0.9725 - val_loss: 0.1243 - val_accuracy: 0.9490
Epoch 193/250
14/14 [==============================] - 16s 1s/step - loss: 0.0967 - accuracy: 0.9739 - val_loss: 0.1226 - val_accuracy: 0.9559
Epoch 194/250
14/14 [==============================] - 16s 1s/step - loss: 0.0897 - accuracy: 0.9751 - val_loss: 0.1349 - val_accuracy: 0.9500
Epoch 195/250
14/14 [==============================] - 16s 1s/step - loss: 0.0914 - accuracy: 0.9735 - val_loss: 0.1385 - val_accuracy: 0.9480
Epoch 196/250
14/14 [==============================] - 16s 1s/step - loss: 0.0964 - accuracy: 0.9713 - val_loss: 0.2781 - val_accuracy: 0.8271
Epoch 197/250
14/14 [==============================] - 16s 1s/step - loss: 0.2585 - accuracy: 0.8700 - val_loss: 0.4179 - val_accuracy: 0.6453
Epoch 198/250
14/14 [==============================] - 16s 1s/step - loss: 0.2811 - accuracy: 0.8308 - val_loss: 0.3871 - val_accuracy: 0.6312
Epoch 199/250
14/14 [==============================] - 16s 1s/step - loss: 0.2519 - accuracy: 0.8282 - val_loss: 0.3385 - val_accuracy: 0.6700
Epoch 200/250
14/14 [==============================] - 16s 1s/step - loss: 0.2254 - accuracy: 0.8408 - val_loss: 0.3056 - val_accuracy: 0.7142
Epoch 201/250
14/14 [==============================] - 16s 1s/step - loss: 0.2061 - accuracy: 0.8575 - val_loss: 0.2811 - val_accuracy: 0.7544
Epoch 202/250
14/14 [==============================] - 16s 1s/step - loss: 0.1901 - accuracy: 0.8695 - val_loss: 0.2599 - val_accuracy: 0.7902
Epoch 203/250
14/14 [==============================] - 16s 1s/step - loss: 0.1817 - accuracy: 0.8821 - val_loss: 0.2395 - val_accuracy: 0.8255
Epoch 204/250
14/14 [==============================] - 16s 1s/step - loss: 0.1689 - accuracy: 0.8931 - val_loss: 0.2239 - val_accuracy: 0.8521
Epoch 205/250
14/14 [==============================] - 16s 1s/step - loss: 0.1614 - accuracy: 0.9027 - val_loss: 0.2117 - val_accuracy: 0.8739
Epoch 206/250
14/14 [==============================] - 16s 1s/step - loss: 0.1611 - accuracy: 0.9115 - val_loss: 0.2007 - val_accuracy: 0.8869
Epoch 207/250
14/14 [==============================] - 16s 1s/step - loss: 0.1953 - accuracy: 0.9102 - val_loss: 0.1927 - val_accuracy: 0.8974
Epoch 208/250
14/14 [==============================] - 16s 1s/step - loss: 0.1783 - accuracy: 0.9159 - val_loss: 0.1874 - val_accuracy: 0.9042
Epoch 209/250
14/14 [==============================] - 16s 1s/step - loss: 0.1645 - accuracy: 0.9215 - val_loss: 0.1835 - val_accuracy: 0.9079
Epoch 210/250
14/14 [==============================] - 16s 1s/step - loss: 0.1588 - accuracy: 0.9242 - val_loss: 0.1791 - val_accuracy: 0.9144
Epoch 211/250
14/14 [==============================] - 16s 1s/step - loss: 0.1547 - accuracy: 0.9290 - val_loss: 0.1735 - val_accuracy: 0.9194
Epoch 212/250
14/14 [==============================] - 16s 1s/step - loss: 0.1431 - accuracy: 0.9340 - val_loss: 0.1710 - val_accuracy: 0.9255
Epoch 213/250
14/14 [==============================] - 16s 1s/step - loss: 0.1331 - accuracy: 0.9376 - val_loss: 0.1705 - val_accuracy: 0.9306
Epoch 214/250
14/14 [==============================] - 16s 1s/step - loss: 0.1378 - accuracy: 0.9395 - val_loss: 0.1715 - val_accuracy: 0.9347
Epoch 215/250
14/14 [==============================] - 16s 1s/step - loss: 0.1411 - accuracy: 0.9418 - val_loss: 0.1598 - val_accuracy: 0.9325
Epoch 216/250
14/14 [==============================] - 16s 1s/step - loss: 0.1415 - accuracy: 0.9408 - val_loss: 0.1559 - val_accuracy: 0.9315
Epoch 217/250
14/14 [==============================] - 16s 1s/step - loss: 0.1395 - accuracy: 0.9406 - val_loss: 0.1549 - val_accuracy: 0.9360
Epoch 218/250
14/14 [==============================] - 16s 1s/step - loss: 0.1266 - accuracy: 0.9468 - val_loss: 0.1550 - val_accuracy: 0.9405
Epoch 219/250
14/14 [==============================] - 16s 1s/step - loss: 0.1177 - accuracy: 0.9515 - val_loss: 0.2042 - val_accuracy: 0.9470
Epoch 220/250
14/14 [==============================] - 16s 1s/step - loss: 0.1134 - accuracy: 0.9546 - val_loss: 0.1807 - val_accuracy: 0.9458
Epoch 221/250
14/14 [==============================] - 16s 1s/step - loss: 0.1099 - accuracy: 0.9566 - val_loss: 0.1766 - val_accuracy: 0.9470
Epoch 222/250
14/14 [==============================] - 16s 1s/step - loss: 0.1060 - accuracy: 0.9608 - val_loss: 0.1926 - val_accuracy: 0.9498
Epoch 223/250
14/14 [==============================] - 16s 1s/step - loss: 0.0958 - accuracy: 0.9653 - val_loss: 0.2044 - val_accuracy: 0.9516
Epoch 224/250
14/14 [==============================] - 16s 1s/step - loss: 0.0965 - accuracy: 0.9659 - val_loss: 0.1823 - val_accuracy: 0.9513
Epoch 225/250
14/14 [==============================] - 16s 1s/step - loss: 0.1422 - accuracy: 0.9595 - val_loss: 0.1773 - val_accuracy: 0.9515
Epoch 226/250
14/14 [==============================] - 16s 1s/step - loss: 0.1636 - accuracy: 0.9565 - val_loss: 0.1662 - val_accuracy: 0.9514
Epoch 227/250
14/14 [==============================] - 16s 1s/step - loss: 0.1438 - accuracy: 0.9602 - val_loss: 0.1608 - val_accuracy: 0.9518
Epoch 228/250
14/14 [==============================] - 16s 1s/step - loss: 0.1324 - accuracy: 0.9613 - val_loss: 0.1628 - val_accuracy: 0.9528
Epoch 229/250
14/14 [==============================] - 16s 1s/step - loss: 0.1178 - accuracy: 0.9646 - val_loss: 0.1671 - val_accuracy: 0.9537
Epoch 230/250
14/14 [==============================] - 16s 1s/step - loss: 0.1113 - accuracy: 0.9657 - val_loss: 0.1516 - val_accuracy: 0.9528
Epoch 231/250
14/14 [==============================] - 16s 1s/step - loss: 0.1022 - accuracy: 0.9682 - val_loss: 0.1348 - val_accuracy: 0.9482
Epoch 232/250
14/14 [==============================] - 16s 1s/step - loss: 0.1009 - accuracy: 0.9672 - val_loss: 0.1417 - val_accuracy: 0.9522
Epoch 233/250
14/14 [==============================] - 16s 1s/step - loss: 0.1028 - accuracy: 0.9679 - val_loss: 0.1423 - val_accuracy: 0.9529
Epoch 234/250
14/14 [==============================] - 16s 1s/step - loss: 0.1111 - accuracy: 0.9663 - val_loss: 0.1469 - val_accuracy: 0.9536
Epoch 235/250
14/14 [==============================] - 16s 1s/step - loss: 0.1020 - accuracy: 0.9689 - val_loss: 0.1464 - val_accuracy: 0.9542
Epoch 236/250
14/14 [==============================] - 16s 1s/step - loss: 0.0970 - accuracy: 0.9695 - val_loss: 0.1600 - val_accuracy: 0.9538
Epoch 237/250
14/14 [==============================] - 16s 1s/step - loss: 0.0949 - accuracy: 0.9695 - val_loss: 0.1624 - val_accuracy: 0.9526
Epoch 238/250
14/14 [==============================] - 16s 1s/step - loss: 0.0886 - accuracy: 0.9710 - val_loss: 0.1381 - val_accuracy: 0.9419
Epoch 239/250
14/14 [==============================] - 16s 1s/step - loss: 0.0868 - accuracy: 0.9687 - val_loss: 0.1369 - val_accuracy: 0.9383
Epoch 240/250
14/14 [==============================] - 16s 1s/step - loss: 0.0884 - accuracy: 0.9706 - val_loss: 0.1493 - val_accuracy: 0.9479
Epoch 241/250
14/14 [==============================] - 16s 1s/step - loss: 0.0756 - accuracy: 0.9744 - val_loss: 0.1569 - val_accuracy: 0.9516
Epoch 242/250
14/14 [==============================] - 16s 1s/step - loss: 0.0696 - accuracy: 0.9775 - val_loss: 0.1522 - val_accuracy: 0.9536
Epoch 243/250
14/14 [==============================] - 16s 1s/step - loss: 0.0964 - accuracy: 0.9714 - val_loss: 0.1386 - val_accuracy: 0.9528
Epoch 244/250
14/14 [==============================] - 16s 1s/step - loss: 0.1570 - accuracy: 0.9639 - val_loss: 0.1411 - val_accuracy: 0.9540
Epoch 245/250
14/14 [==============================] - 16s 1s/step - loss: 0.1371 - accuracy: 0.9665 - val_loss: 0.1423 - val_accuracy: 0.9546
Epoch 246/250
14/14 [==============================] - 16s 1s/step - loss: 0.1201 - accuracy: 0.9696 - val_loss: 0.1370 - val_accuracy: 0.9550
Epoch 247/250
14/14 [==============================] - 16s 1s/step - loss: 0.1205 - accuracy: 0.9695 - val_loss: 0.1413 - val_accuracy: 0.9538
Epoch 248/250
14/14 [==============================] - 16s 1s/step - loss: 0.1116 - accuracy: 0.9683 - val_loss: 0.1420 - val_accuracy: 0.9349
Epoch 249/250
14/14 [==============================] - 16s 1s/step - loss: 0.1022 - accuracy: 0.9645 - val_loss: 0.1398 - val_accuracy: 0.9327
Epoch 250/250
14/14 [==============================] - 16s 1s/step - loss: 0.0973 - accuracy: 0.9630 - val_loss: 0.1797 - val_accuracy: 0.8782

Manually Save Model

In [ ]:
# ## Save the model at the latest EPOCH or as desired
model.save(f"./saved_models/model_{EPOCHS}.h5")

Load

In [ ]:
MODEL_PATH = "./saved_models/model_100.h5" #Change this to the model path you want to load

model.load_weights(MODEL_PATH, by_name=True)

Process Prediction

In [ ]:
import sys
np.set_printoptions(threshold=sys.maxsize)
In [ ]:
#Pick first batch of Image from Validation set
for image, mask in dataset['val'].take(1):
    sample_image, sample_mask = image, mask
In [ ]:
#Pick first Prediction Validation set
sample_idx = 3
pred_mask = model.predict(sample_image)

sample_image = sample_image.numpy()[sample_idx] #Pick first image form a batch of 5
sample_image = (sample_image*255.0).astype(np.uint32)

#Ground Truth Mask
sample_mask = sample_mask.numpy()[sample_idx]
sample_mask = np.squeeze(sample_mask, axis =-1)

#Predicted Mask
pred_mask = pred_mask[sample_idx] #Pick first mask form a batch of 5
In [ ]:
pred_mask_IoU = np.argmax(pred_mask, axis =-1)
#Mean IoU 
m = tf.keras.metrics.MeanIoU(num_classes=3)
m.update_state(sample_mask, pred_mask_IoU)
print("IoU =",m.result().numpy())
IoU = 0.59101623
In [ ]:
print(sample_image.shape)
print(pred_mask.shape)
(256, 256, 3)
(256, 256, 256)
In [ ]:
pred_mask = np.argmax(pred_mask, axis =-1)
#pred_mask = np.expand_dims(pred_mask, axis =-1)
print(sample_image.shape)
print(pred_mask.shape)
(256, 256, 3)
(256, 256)
In [ ]:
#print(pred_mask)
# This is formatted as code

Visualize Prediction

In [ ]:
colors = visualize.random_colors(len(img_meta_obj.annotations))
color = colors[0]
print(color)

masked_image = sample_image.astype(np.uint32).copy()
masked_image = visualize.apply_mask(masked_image, pred_mask, color)
#masked_image = visualize.apply_mask(masked_image, pred_mask)
print(masked_image.shape,pred_mask.shape)
visualize.display_images([masked_image])
(1.0, 0.8571428571428571, 0.0)
(256, 256, 3) (256, 256)
In [ ]:
#print(pred_mask)
print(np.count_nonzero(pred_mask))
2143
In [ ]:
#GT
colors = visualize.random_colors(len(img_meta_obj.annotations))
color = colors[0]
print(color)

masked_image = sample_image.astype(np.uint32).copy()

print(masked_image.shape)
print(sample_mask.shape)

masked_image = visualize.apply_mask(masked_image, sample_mask, color)

visualize.display_images([masked_image])
(0.0, 0.8571428571428568, 1.0)
(256, 256, 3)
(256, 256)
In [ ]:
print(np.count_nonzero(sample_mask))
2832
In [ ]:
print(masked_image.shape, pred_mask.shape)
(256, 256, 3) (256, 256)
In [ ]:
import sys
np.set_printoptions(threshold=sys.maxsize)
sample_idx=0
#Pick first batch of Image from Validation set
for image, mask in dataset['val'].take(5):
    sample_image, sample_mask = image, mask
  #Pick first Prediction Validation set
#for sample_idx in range(1):
    
    pred_mask = model.predict(sample_image)

    sample_image = sample_image.numpy()[sample_idx] #Pick first image form a batch of 5
    sample_image = (sample_image*255.0).astype(np.uint32)

    #Ground Truth Mask
    sample_mask = sample_mask.numpy()[sample_idx]
    sample_mask = np.squeeze(sample_mask, axis =-1)

    #Predicted Mask
    pred_mask = pred_mask[sample_idx] #Pick first mask form a batch of 5
    pred_mask_IoU = np.argmax(pred_mask, axis =-1)
    #Mean IoU 
    m = tf.keras.metrics.MeanIoU(num_classes=3)
    m.update_state(sample_mask, pred_mask_IoU)
    print("IoU =",m.result().numpy())

    #prediction visualization
    colors = visualize.random_colors(len(img_meta_obj.annotations))
    color = colors[0]
    print(color)

    masked_image = sample_image.astype(np.uint32).copy()
    pred_mask = np.argmax(pred_mask, axis =-1)
    print(masked_image.shape)
    print(pred_mask.shape)
    masked_image = visualize.apply_mask(masked_image, pred_mask, color)
    print("predict image")
    visualize.display_images([masked_image])
    #GT
    colors = visualize.random_colors(len(img_meta_obj.annotations))
    color = colors[0]
    print(color)


    masked_image = sample_image.astype(np.uint32).copy()

    print(masked_image.shape)
    print(sample_mask.shape)

    masked_image = visualize.apply_mask(masked_image, sample_mask, color)
    print("Ground Truth Image")
    visualize.display_images([masked_image])
    sample_idx=sample_idx+1
IoU = 0.62726545
(1.0, 0.0, 0.2857142857142865)
(256, 256, 3)
(256, 256)
predict image
(1.0, 0.0, 0.2857142857142865)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.55100787
(0.2857142857142858, 1.0, 0.0)
(256, 256, 3)
(256, 256)
predict image
(1.0, 0.8571428571428571, 0.0)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.588047
(0.2857142857142856, 0.0, 1.0)
(256, 256, 3)
(256, 256)
predict image
(0.0, 1.0, 0.0)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.57308996
(0.8571428571428577, 0.0, 1.0)
(256, 256, 3)
(256, 256)
predict image
(0.0, 1.0, 0.5714285714285712)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.6049369
(0.0, 0.0, 1.0)
(256, 256, 3)
(256, 256)
predict image
(0.0, 1.0, 0.8571428571428568)
(256, 256, 3)
(256, 256)
Ground Truth Image
In [ ]:
# Vizualize Ground Truth
In [ ]:
#GT
colors = visualize.random_colors(len(img_meta_obj.annotations))
color = colors[0]
print(color)


masked_image = sample_image.astype(np.uint32).copy()

print(masked_image.shape)
print(sample_mask.shape)

masked_image = visualize.apply_mask(masked_image, sample_mask, color)

visualize.display_images([masked_image])
(1.0, 0.0, 0.2857142857142865)
(256, 256, 3)
(256, 256)
In [ ]:
masked_image = sample_image.astype(np.uint32).copy()
masked_image = visualize.apply_mask(masked_image, pred_mask, color)
visualize.display_images([masked_image])

masked_image = sample_image.astype(np.uint32).copy()
masked_image = visualize.apply_mask(masked_image, sample_mask, color)

visualize.display_images([masked_image])
In [ ]: